summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/can/dev/bittiming.c120
-rw-r--r--drivers/net/can/dev/calc_bittiming.c34
-rw-r--r--drivers/net/can/dev/dev.c21
-rw-r--r--drivers/net/can/dev/netlink.c49
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c225
-rw-r--r--drivers/net/can/sja1000/ems_pci.c154
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c18
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h26
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c44
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c122
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h12
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c68
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c30
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h1
-rw-r--r--drivers/net/dsa/microchip/Kconfig3
-rw-r--r--drivers/net/dsa/mt7530.c26
-rw-r--r--drivers/net/dsa/ocelot/Kconfig20
-rw-r--r--drivers/net/dsa/ocelot/Makefile2
-rw-r--r--drivers/net/dsa/ocelot/felix.c25
-rw-r--r--drivers/net/dsa/ocelot/felix.h2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c1
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c163
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h38
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c188
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c474
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h51
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c8
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c21
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c173
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h11
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c228
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h25
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c27
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c104
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c989
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1006
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c133
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c183
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c53
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c30
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c755
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h29
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c174
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c94
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c29
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h124
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h808
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c53
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pool.c81
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c332
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c59
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c335
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c787
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c1166
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c117
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c786
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h34
-rw-r--r--drivers/net/ethernet/microchip/vcap/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h11
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c39
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h1
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h5
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c6
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c4
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c66
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c14
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.c409
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.h31
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c37
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c48
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c31
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c190
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c159
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c206
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c68
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c87
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c231
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h4
-rw-r--r--drivers/net/ethernet/sfc/efx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c20
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c1
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c675
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2004
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h32
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h314
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c249
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c271
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h21
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/ipa/gsi.c82
-rw-r--r--drivers/net/ipa/gsi_reg.h109
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_cmd.c38
-rw-r--r--drivers/net/ipa/ipa_endpoint.c307
-rw-r--r--drivers/net/ipa/ipa_endpoint.h4
-rw-r--r--drivers/net/ipa/ipa_interrupt.c39
-rw-r--r--drivers/net/ipa/ipa_main.c122
-rw-r--r--drivers/net/ipa/ipa_mem.c22
-rw-r--r--drivers/net/ipa/ipa_mem.h8
-rw-r--r--drivers/net/ipa/ipa_reg.c90
-rw-r--r--drivers/net/ipa/ipa_reg.h180
-rw-r--r--drivers/net/ipa/ipa_resource.c16
-rw-r--r--drivers/net/ipa/ipa_table.c68
-rw-r--r--drivers/net/ipa/ipa_uc.c6
-rw-r--r--drivers/net/ipa/ipa_version.h6
-rw-r--r--drivers/net/ipa/reg.h133
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c283
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c269
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c271
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c255
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c287
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c271
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c271
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/mdio/Kconfig11
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c11
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c164
-rw-r--r--drivers/net/netdevsim/dev.c1
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c6
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/dp83822.c6
-rw-r--r--drivers/net/phy/meson-gxl.c4
-rw-r--r--drivers/net/phy/micrel.c194
-rw-r--r--drivers/net/phy/motorcomm.c559
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/phylink.c5
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/plusb.c4
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c11
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c29
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c16
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c2
283 files changed, 16989 insertions, 5560 deletions
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 4f9b4a18c74c..594094526648 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
- if (d) {
+ if (!IS_ERR(d)) {
bond->debug_dir = d;
} else {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 7ae80763c960..0b93900b1dfa 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -6,25 +6,81 @@
#include <linux/can/dev.h>
+void can_sjw_set_default(struct can_bittiming *bt)
+{
+ if (bt->sjw)
+ return;
+
+ /* If user space provides no sjw, use sane default of phase_seg2 / 2 */
+ bt->sjw = max(1U, min(bt->phase_seg1, bt->phase_seg2 / 2));
+}
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ if (bt->sjw > btc->sjw_max) {
+ NL_SET_ERR_MSG_FMT(extack, "sjw: %u greater than max sjw: %u",
+ bt->sjw, btc->sjw_max);
+ return -EINVAL;
+ }
+
+ if (bt->sjw > bt->phase_seg1) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg1: %u",
+ bt->sjw, bt->phase_seg1);
+ return -EINVAL;
+ }
+
+ if (bt->sjw > bt->phase_seg2) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg2: %u",
+ bt->sjw, bt->phase_seg2);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* Checks the validity of the specified bit-timing parameters prop_seg,
* phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc,
+ struct netlink_ext_ack *extack)
{
+ const unsigned int tseg1 = bt->prop_seg + bt->phase_seg1;
const struct can_priv *priv = netdev_priv(dev);
- unsigned int tseg1, alltseg;
u64 brp64;
+ int err;
- tseg1 = bt->prop_seg + bt->phase_seg1;
- if (!bt->sjw)
- bt->sjw = 1;
- if (bt->sjw > btc->sjw_max ||
- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
- return -ERANGE;
+ if (tseg1 < btc->tseg1_min) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u less than tseg1-min: %u",
+ tseg1, btc->tseg1_min);
+ return -EINVAL;
+ }
+ if (tseg1 > btc->tseg1_max) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u greater than tseg1-max: %u",
+ tseg1, btc->tseg1_max);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 < btc->tseg2_min) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u less than tseg2-min: %u",
+ bt->phase_seg2, btc->tseg2_min);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 > btc->tseg2_max) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u greater than tseg2-max: %u",
+ bt->phase_seg2, btc->tseg2_max);
+ return -EINVAL;
+ }
+
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
brp64 = (u64)priv->clock.freq * (u64)bt->tq;
if (btc->brp_inc > 1)
@@ -35,12 +91,21 @@ static int can_fixup_bittiming(const struct net_device *dev, struct can_bittimin
brp64 *= btc->brp_inc;
bt->brp = (u32)brp64;
- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ if (bt->brp < btc->brp_min) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u less than brp-min: %u",
+ bt->brp, btc->brp_min);
+ return -EINVAL;
+ }
+ if (bt->brp > btc->brp_max) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u greater than brp-max: %u",
+ bt->brp, btc->brp_max);
return -EINVAL;
+ }
- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+ bt->bitrate = priv->clock.freq / (bt->brp * can_bit_time(bt));
+ bt->sample_point = ((CAN_SYNC_SEG + tseg1) * 1000) / can_bit_time(bt);
+ bt->tq = DIV_U64_ROUND_CLOSEST(mul_u32_u32(bt->brp, NSEC_PER_SEC),
+ priv->clock.freq);
return 0;
}
@@ -49,7 +114,8 @@ static int can_fixup_bittiming(const struct net_device *dev, struct can_bittimin
static int
can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
unsigned int i;
@@ -58,30 +124,30 @@ can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *b
return 0;
}
+ NL_SET_ERR_MSG_FMT(extack, "bitrate %u bps not supported",
+ bt->brp);
+
return -EINVAL;
}
int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
- int err;
-
/* Depending on the given can_bittiming parameter structure the CAN
* timing parameters are calculated based on the provided bitrate OR
* alternatively the CAN timing parameters (tq, prop_seg, etc.) are
* provided directly which are then checked and fixed up.
*/
if (!bt->tq && bt->bitrate && btc)
- err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate && btc)
- err = can_fixup_bittiming(dev, bt, btc);
- else if (!bt->tq && bt->bitrate && bitrate_const)
- err = can_validate_bitrate(dev, bt, bitrate_const,
- bitrate_const_cnt);
- else
- err = -EINVAL;
-
- return err;
+ return can_calc_bittiming(dev, bt, btc, extack);
+ if (bt->tq && !bt->bitrate && btc)
+ return can_fixup_bittiming(dev, bt, btc, extack);
+ if (!bt->tq && bt->bitrate && bitrate_const)
+ return can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt, extack);
+
+ return -EINVAL;
}
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index d3caa040614d..3809c148fb88 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -63,7 +63,7 @@ can_update_sample_point(const struct can_bittiming_const *btc,
}
int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
unsigned int bitrate; /* current bitrate */
@@ -76,6 +76,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
unsigned int best_brp = 0; /* current best value for brp */
unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
u64 v64;
+ int err;
/* Use CiA recommended sample points */
if (bt->sample_point) {
@@ -133,13 +134,14 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
do_div(v64, bt->bitrate);
bitrate_error = (u32)v64;
if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%u%% too high",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EINVAL;
}
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%u%%",
+ bitrate_error / 10, bitrate_error % 10);
}
/* real sample point */
@@ -154,23 +156,17 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
bt->phase_seg1 = tseg1 - bt->prop_seg;
bt->phase_seg2 = tseg2;
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
bt->brp = best_brp;
/* real bitrate */
bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+ (bt->brp * can_bit_time(bt));
return 0;
}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index c1956b1e9faf..7f9334a8af50 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -498,6 +498,18 @@ static int can_get_termination(struct net_device *ndev)
return 0;
}
+static bool
+can_bittiming_const_valid(const struct can_bittiming_const *btc)
+{
+ if (!btc)
+ return true;
+
+ if (!btc->sjw_max)
+ return false;
+
+ return true;
+}
+
/* Register the CAN network device */
int register_candev(struct net_device *dev)
{
@@ -518,6 +530,15 @@ int register_candev(struct net_device *dev)
if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
return -EINVAL;
+ /* We only support either fixed bit rates or bit timing const. */
+ if ((priv->bitrate_const || priv->data_bitrate_const) &&
+ (priv->bittiming_const || priv->data_bittiming_const))
+ return -EINVAL;
+
+ if (!can_bittiming_const_valid(priv->bittiming_const) ||
+ !can_bittiming_const_valid(priv->data_bittiming_const))
+ return -EINVAL;
+
if (!priv->termination_const) {
err = can_get_termination(dev);
if (err)
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 8efa22d9f214..036d85ef07f5 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -36,10 +36,24 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
+static int can_validate_bittiming(const struct can_bittiming *bt,
+ struct netlink_ext_ack *extack)
+{
+ /* sample point is in one-tenth of a percent */
+ if (bt->sample_point >= 1000) {
+ NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
bool is_can_fd = false;
+ int err;
/* Make sure that valid CAN FD configurations always consist of
* - nominal/arbitration bittiming
@@ -51,6 +65,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (!data)
return 0;
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
@@ -71,7 +94,6 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
*/
if (data[IFLA_CAN_TDC]) {
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
- int err;
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
data[IFLA_CAN_TDC],
@@ -102,6 +124,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
return -EOPNOTSUPP;
}
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -184,13 +215,15 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
err = can_get_bittiming(dev, &bt,
priv->bittiming_const,
priv->bitrate_const,
- priv->bitrate_const_cnt);
+ priv->bitrate_const_cnt,
+ extack);
if (err)
return err;
if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
+ NL_SET_ERR_MSG_FMT(extack,
+ "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
+ bt.bitrate, priv->bitrate_max);
return -EINVAL;
}
@@ -288,13 +321,15 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
err = can_get_bittiming(dev, &dbt,
priv->data_bittiming_const,
priv->data_bitrate_const,
- priv->data_bitrate_const_cnt);
+ priv->data_bitrate_const_cnt,
+ extack);
if (err)
return err;
if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
+ NL_SET_ERR_MSG_FMT(extack,
+ "CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
return -EINVAL;
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index f6fa7157b99b..ef4e1b9a9e1e 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -21,23 +21,23 @@
* wherever it is modified to a readable name.
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/can/dev.h>
-#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/iopoll.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/types.h>
#define RCANFD_DRV_NAME "rcar_canfd"
@@ -82,8 +82,8 @@
#define RCANFD_GERFL_DEF BIT(0)
#define RCANFD_GERFL_ERR(gpriv, x) \
- ((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \
- RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
+ ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \
+ RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
RCANFD_GERFL_MES | \
((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
@@ -91,16 +91,16 @@
/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
- (((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \
- (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8)))
+ (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \
+ (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8)))
#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
- (((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \
- reg_v3u(gpriv, 0x1ff, 0xff))
+ (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \
+ reg_gen4(gpriv, 0x1ff, 0xff))
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_v3u(gpriv, 0x7f, 0x1f))
+#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f))
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -118,13 +118,13 @@
/* RSCFDnCFDCmNCFG - CAN FD only */
#define RCANFD_NCFG_NTSEG2(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 25, 24))
+ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24))
#define RCANFD_NCFG_NTSEG1(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0xff, 0x7f)) << reg_v3u(gpriv, 17, 16))
+ (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16))
#define RCANFD_NCFG_NSJW(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 10, 11))
+ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11))
#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
@@ -186,19 +186,19 @@
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
+#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24)
#define RCANFD_DCFG_DTSEG2(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x0f, 0x7)) << reg_v3u(gpriv, 16, 20))
+ (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20))
#define RCANFD_DCFG_DTSEG1(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x1f, 0xf)) << reg_v3u(gpriv, 8, 16))
+ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16))
#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCmFDCFG */
-#define RCANFD_FDCFG_CLOE BIT(30)
-#define RCANFD_FDCFG_FDOE BIT(28)
+#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
+#define RCANFD_GEN4_FDCFG_FDOE BIT(28)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
@@ -233,10 +233,11 @@
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(gpriv, x) (((x) & 0xf) << reg_v3u(gpriv, 16, 20))
-#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_v3u(gpriv, 8, 16))
+#define RCANFD_CFCC_CFTML(gpriv, x) \
+ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20))
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16))
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_v3u(gpriv, 21, 8))
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8))
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -304,7 +305,7 @@
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(gpriv, x) (reg_v3u(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
@@ -314,13 +315,13 @@
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
#define RCANFD_CFCC(gpriv, ch, idx) \
- (reg_v3u(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
+ (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
#define RCANFD_CFSTS(gpriv, ch, idx) \
- (reg_v3u(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
+ (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
#define RCANFD_CFPCTR(gpriv, ch, idx) \
- (reg_v3u(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
+ (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDFESTS / RSCFDnFESTS */
#define RCANFD_FESTS (0x0238)
@@ -428,16 +429,15 @@
/* RSCFDnRPGACCr */
#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
-/* R-Car V3U Classical and CAN FD mode specific register map */
-#define RCANFD_V3U_CFDCFG (0x1314)
-#define RCANFD_V3U_DCFG(m) (0x1400 + (0x20 * (m)))
+/* R-Car Gen4 Classical and CAN FD mode specific register map */
+#define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m)))
-#define RCANFD_V3U_GAFL_OFFSET (0x1800)
+#define RCANFD_GEN4_GAFL_OFFSET (0x1800)
/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
+#define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m)))
#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
@@ -453,7 +453,7 @@
#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET(gpriv) reg_v3u(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000)
#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
@@ -461,7 +461,7 @@
(RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET(gpriv) reg_v3u(gpriv, 0x6400, 0x3400)
+#define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400)
#define RCANFD_F_CFID(gpriv, ch, idx) \
(RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
@@ -597,28 +597,28 @@ static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
.shared_global_irqs = 1,
};
+static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
+ .max_channels = 8,
+ .postdiv = 2,
+ .shared_global_irqs = 1,
+};
+
static const struct rcar_canfd_hw_info rzg2l_hw_info = {
.max_channels = 2,
.postdiv = 1,
.multi_channel_irqs = 1,
};
-static const struct rcar_canfd_hw_info r8a779a0_hw_info = {
- .max_channels = 8,
- .postdiv = 2,
- .shared_global_irqs = 1,
-};
-
/* Helper functions */
-static inline bool is_v3u(struct rcar_canfd_global *gpriv)
+static inline bool is_gen4(struct rcar_canfd_global *gpriv)
{
- return gpriv->info == &r8a779a0_hw_info;
+ return gpriv->info == &rcar_gen4_hw_info;
}
-static inline u32 reg_v3u(struct rcar_canfd_global *gpriv,
- u32 v3u, u32 not_v3u)
+static inline u32 reg_gen4(struct rcar_canfd_global *gpriv,
+ u32 gen4, u32 not_gen4)
{
- return is_v3u(gpriv) ? v3u : not_v3u;
+ return is_gen4(gpriv) ? gen4 : not_gen4;
}
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
@@ -688,13 +688,14 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
{
- if (is_v3u(gpriv)) {
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
- RCANFD_FDCFG_FDOE);
- else
- rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
- RCANFD_FDCFG_CLOE);
+ if (is_gen4(gpriv)) {
+ u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
+ : RCANFD_GEN4_FDCFG_CLOE;
+
+ for_each_set_bit(ch, &gpriv->channels_mask,
+ gpriv->info->max_channels)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch),
+ val);
} else {
if (gpriv->fdmode)
rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
@@ -814,8 +815,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
/* Write number of rules for channel */
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
- if (is_v3u(gpriv))
- offset = RCANFD_V3U_GAFL_OFFSET;
+ if (is_gen4(gpriv))
+ offset = RCANFD_GEN4_GAFL_OFFSET;
else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
else
@@ -1343,17 +1344,14 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
tseg2 = dbt->phase_seg2 - 1;
cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
+ RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
- if (is_v3u(gpriv))
- rcar_canfd_write(priv->base, RCANFD_V3U_DCFG(ch), cfg);
- else
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+ rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg);
netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
brp, sjw, tseg1, tseg2);
} else {
/* Classical CAN only mode */
- if (is_v3u(gpriv)) {
+ if (is_gen4(gpriv)) {
cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
RCANFD_NCFG_NBRP(brp) |
RCANFD_NCFG_NSJW(gpriv, sjw) |
@@ -1510,7 +1508,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
rcar_canfd_write(priv->base,
RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
@@ -1569,7 +1567,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
@@ -1620,7 +1618,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
- else if (is_v3u(gpriv))
+ else if (is_gen4(gpriv))
rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
@@ -1717,13 +1715,14 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
{
const struct rcar_canfd_hw_info *info = gpriv->info;
struct platform_device *pdev = gpriv->pdev;
+ struct device *dev = &pdev->dev;
struct rcar_canfd_channel *priv;
struct net_device *ndev;
int err = -ENODEV;
ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH);
if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
+ dev_err(dev, "alloc_candev() failed\n");
return -ENOMEM;
}
priv = netdev_priv(ndev);
@@ -1736,7 +1735,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->channel = ch;
priv->gpriv = gpriv;
priv->can.clock.freq = fcan_freq;
- dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
+ dev_info(dev, "can_clk rate is %u\n", priv->can.clock.freq);
if (info->multi_channel_irqs) {
char *irq_name;
@@ -1755,31 +1754,31 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_err", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_err",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, err_irq,
+ err = devm_request_irq(dev, err_irq,
rcar_canfd_channel_err_interrupt, 0,
irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq CH Err(%d) failed, error %d\n",
err_irq, err);
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_trx", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_trx",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, tx_irq,
+ err = devm_request_irq(dev, tx_irq,
rcar_canfd_channel_tx_interrupt, 0,
irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq Tx (%d) failed, error %d\n",
tx_irq, err);
goto fail;
}
@@ -1803,7 +1802,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->can.do_set_mode = rcar_canfd_do_set_mode;
priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll,
RCANFD_NAPI_WEIGHT);
@@ -1811,11 +1810,10 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev,
- "register_candev() failed, error %d\n", err);
+ dev_err(dev, "register_candev() failed, error %d\n", err);
goto fail_candev;
}
- dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
+ dev_info(dev, "device registered (channel %u)\n", priv->channel);
return 0;
fail_candev:
@@ -1839,6 +1837,7 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
static int rcar_canfd_probe(struct platform_device *pdev)
{
const struct rcar_canfd_hw_info *info;
+ struct device *dev = &pdev->dev;
void __iomem *addr;
u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
@@ -1850,14 +1849,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
char name[9] = "channelX";
int i;
- info = of_device_get_match_data(&pdev->dev);
+ info = of_device_get_match_data(dev);
- if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
+ if (of_property_read_bool(dev->of_node, "renesas,no-can-fd"))
fdmode = false; /* Classical CAN only mode */
for (i = 0; i < info->max_channels; ++i) {
name[7] = '0' + i;
- of_child = of_get_child_by_name(pdev->dev.of_node, name);
+ of_child = of_get_child_by_name(dev->of_node, name);
if (of_child && of_device_is_available(of_child))
channels_mask |= BIT(i);
of_node_put(of_child);
@@ -1890,7 +1889,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
/* Global controller context */
- gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
+ gpriv = devm_kzalloc(dev, sizeof(*gpriv), GFP_KERNEL);
if (!gpriv)
return -ENOMEM;
@@ -1899,32 +1898,30 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->fdmode = fdmode;
gpriv->info = info;
- gpriv->rstc1 = devm_reset_control_get_optional_exclusive(&pdev->dev,
- "rstp_n");
+ gpriv->rstc1 = devm_reset_control_get_optional_exclusive(dev, "rstp_n");
if (IS_ERR(gpriv->rstc1))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc1),
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc1),
"failed to get rstp_n\n");
- gpriv->rstc2 = devm_reset_control_get_optional_exclusive(&pdev->dev,
- "rstc_n");
+ gpriv->rstc2 = devm_reset_control_get_optional_exclusive(dev, "rstc_n");
if (IS_ERR(gpriv->rstc2))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc2),
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc2),
"failed to get rstc_n\n");
/* Peripheral clock */
- gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
+ gpriv->clkp = devm_clk_get(dev, "fck");
if (IS_ERR(gpriv->clkp))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->clkp),
+ return dev_err_probe(dev, PTR_ERR(gpriv->clkp),
"cannot get peripheral clock\n");
/* fCAN clock: Pick External clock. If not available fallback to
* CANFD clock
*/
- gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+ gpriv->can_clk = devm_clk_get(dev, "can_clk");
if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
- gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
+ gpriv->can_clk = devm_clk_get(dev, "canfd");
if (IS_ERR(gpriv->can_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->can_clk),
+ return dev_err_probe(dev, PTR_ERR(gpriv->can_clk),
"cannot get canfd clock\n");
gpriv->fcan = RCANFD_CANFDCLK;
@@ -1947,39 +1944,38 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Request IRQ that's common for both channels */
if (info->shared_global_irqs) {
- err = devm_request_irq(&pdev->dev, ch_irq,
+ err = devm_request_irq(dev, ch_irq,
rcar_canfd_channel_interrupt, 0,
"canfd.ch_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
ch_irq, err);
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_irq,
- rcar_canfd_global_interrupt, 0,
- "canfd.g_int", gpriv);
+ err = devm_request_irq(dev, g_irq, rcar_canfd_global_interrupt,
+ 0, "canfd.g_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
g_irq, err);
goto fail_dev;
}
} else {
- err = devm_request_irq(&pdev->dev, g_recc_irq,
+ err = devm_request_irq(dev, g_recc_irq,
rcar_canfd_global_receive_fifo_interrupt, 0,
"canfd.g_recc", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
g_recc_irq, err);
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_err_irq,
+ err = devm_request_irq(dev, g_err_irq,
rcar_canfd_global_err_interrupt, 0,
"canfd.g_err", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
g_err_irq, err);
goto fail_dev;
}
@@ -1997,14 +1993,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Enable peripheral clock for register access */
err = clk_prepare_enable(gpriv->clkp);
if (err) {
- dev_err(&pdev->dev,
- "failed to enable peripheral clock, error %d\n", err);
+ dev_err(dev, "failed to enable peripheral clock, error %d\n",
+ err);
goto fail_reset;
}
err = rcar_canfd_reset_controller(gpriv);
if (err) {
- dev_err(&pdev->dev, "reset controller failed\n");
+ dev_err(dev, "reset controller failed\n");
goto fail_clk;
}
@@ -2034,7 +2030,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
!(sts & RCANFD_GSTS_GNOPM), 2, 500000);
if (err) {
- dev_err(&pdev->dev, "global operational mode failed\n");
+ dev_err(dev, "global operational mode failed\n");
goto fail_mode;
}
@@ -2045,7 +2041,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, gpriv);
- dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n",
+ dev_info(dev, "global operational state (clk %d, fdmode %d)\n",
gpriv->fcan, gpriv->fdmode);
return 0;
@@ -2099,9 +2095,10 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
rcar_canfd_resume);
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
+ { .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
+ { .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
- { .compatible = "renesas,r8a779a0-canfd", .data = &r8a779a0_hw_info },
{ }
};
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 4ab91759a5c6..c56e27223e5f 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -3,6 +3,7 @@
* Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
* Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ * Copyright (C) 2023 EMS Dr. Thomas Wuensche
*/
#include <linux/kernel.h>
@@ -19,12 +20,14 @@
#define DRV_NAME "ems_pci"
-MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
+MODULE_AUTHOR("Sebastian Haas <support@ems-wuensche.com>");
+MODULE_AUTHOR("Gerhard Uttenthaler <uttenthaler@ems-wuensche.com>");
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");
MODULE_LICENSE("GPL v2");
#define EMS_PCI_V1_MAX_CHAN 2
#define EMS_PCI_V2_MAX_CHAN 4
+#define EMS_PCI_V3_MAX_CHAN 4
#define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN
struct ems_pci_card {
@@ -40,8 +43,7 @@ struct ems_pci_card {
#define EMS_PCI_CAN_CLOCK (16000000 / 2)
-/*
- * Register definitions and descriptions are from LinCAN 0.3.3.
+/* Register definitions and descriptions are from LinCAN 0.3.3.
*
* PSB4610 PITA-2 bridge control registers
*/
@@ -52,8 +54,7 @@ struct ems_pci_card {
#define PITA2_MISC 0x1c /* Miscellaneous Register */
#define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */
-/*
- * Register definitions for the PLX 9030
+/* Register definitions for the PLX 9030
*/
#define PLX_ICSR 0x4c /* Interrupt Control/Status register */
#define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */
@@ -62,8 +63,16 @@ struct ems_pci_card {
#define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \
PLX_ICSR_LINTI1_CLR)
-/*
- * The board configuration is probably following:
+/* Register definitions for the ASIX99100
+ */
+#define ASIX_LINTSR 0x28 /* Interrupt Control/Status register */
+#define ASIX_LINTSR_INT0AC BIT(0) /* Writing 1 enables or clears interrupt */
+
+#define ASIX_LIEMR 0x24 /* Local Interrupt Enable / Miscellaneous Register */
+#define ASIX_LIEMR_L0EINTEN BIT(16) /* Local INT0 input assertion enable */
+#define ASIX_LIEMR_LRST BIT(14) /* Local Reset assert */
+
+/* The board configuration is probably following:
* RX1 is connected to ground.
* TX1 is not connected.
* CLKO is not connected.
@@ -72,23 +81,40 @@ struct ems_pci_card {
*/
#define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
-/*
- * In the CDR register, you should set CBP to 1.
+/* In the CDR register, you should set CBP to 1.
* You will probably also want to set the clock divider value to 7
* (meaning direct oscillator output) because the second SJA1000 chip
* is driven by the first one CLKOUT output.
*/
#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
-#define EMS_PCI_V1_BASE_BAR 1
-#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
-#define EMS_PCI_V2_BASE_BAR 2
-#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
-#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
-#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+#define EMS_PCI_V1_BASE_BAR 1
+#define EMS_PCI_V1_CONF_BAR 0
+#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
+#define EMS_PCI_V1_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V1_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V2_BASE_BAR 2
+#define EMS_PCI_V2_CONF_BAR 0
+#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
+#define EMS_PCI_V2_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V2_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V3_BASE_BAR 0
+#define EMS_PCI_V3_CONF_BAR 5
+#define EMS_PCI_V3_CONF_SIZE 128 /* size of ASIX control area */
+#define EMS_PCI_V3_CAN_BASE_OFFSET 0x00 /* offset where the controllers starts */
+#define EMS_PCI_V3_CAN_CTRL_SIZE 0x100 /* memory size for each controller */
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
+#ifndef PCI_VENDOR_ID_ASIX
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_9110 0x9110
+#define PCI_SUBVENDOR_ID_ASIX 0xa000
+#endif
+#define PCI_SUBDEVICE_ID_EMS 0x4010
+
static const struct pci_device_id ems_pci_tbl[] = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
@@ -96,12 +122,13 @@ static const struct pci_device_id ems_pci_tbl[] = {
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000},
/* CPC-104P v2 */
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002},
+ /* CPC-PCIe v3 */
+ {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_9110, PCI_SUBVENDOR_ID_ASIX, PCI_SUBDEVICE_ID_EMS},
{0,}
};
MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
-/*
- * Helper to read internal registers from card logic (not CAN)
+/* Helper to read internal registers from card logic (not CAN)
*/
static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port)
{
@@ -146,8 +173,25 @@ static void ems_pci_v2_post_irq(const struct sja1000_priv *priv)
writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR);
}
-/*
- * Check if a CAN controller is present at the specified location
+static u8 ems_pci_v3_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + port);
+}
+
+static void ems_pci_v3_write_reg(const struct sja1000_priv *priv,
+ int port, u8 val)
+{
+ writeb(val, priv->reg_base + port);
+}
+
+static void ems_pci_v3_post_irq(const struct sja1000_priv *priv)
+{
+ struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+}
+
+/* Check if a CAN controller is present at the specified location
* by trying to set 'em into the PeliCAN mode
*/
static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
@@ -185,10 +229,10 @@ static void ems_pci_del_card(struct pci_dev *pdev)
free_sja1000dev(dev);
}
- if (card->base_addr != NULL)
+ if (card->base_addr)
pci_iounmap(card->pci_dev, card->base_addr);
- if (card->conf_addr != NULL)
+ if (card->conf_addr)
pci_iounmap(card->pci_dev, card->conf_addr);
kfree(card);
@@ -202,8 +246,7 @@ static void ems_pci_card_reset(struct ems_pci_card *card)
writeb(0, card->base_addr);
}
-/*
- * Probe PCI device for EMS CAN signature and register each available
+/* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
static int ems_pci_add_card(struct pci_dev *pdev,
@@ -212,7 +255,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pci_card *card;
- int max_chan, conf_size, base_bar;
+ int max_chan, conf_size, base_bar, conf_bar;
int err, i;
/* Enabling PCI device */
@@ -222,8 +265,8 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
/* Allocating card structures to hold addresses, ... */
- card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL);
- if (card == NULL) {
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
pci_disable_device(pdev);
return -ENOMEM;
}
@@ -234,27 +277,35 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels = 0;
- if (pdev->vendor == PCI_VENDOR_ID_PLX) {
+ if (pdev->vendor == PCI_VENDOR_ID_ASIX) {
+ card->version = 3; /* CPC-PCI v3 */
+ max_chan = EMS_PCI_V3_MAX_CHAN;
+ base_bar = EMS_PCI_V3_BASE_BAR;
+ conf_bar = EMS_PCI_V3_CONF_BAR;
+ conf_size = EMS_PCI_V3_CONF_SIZE;
+ } else if (pdev->vendor == PCI_VENDOR_ID_PLX) {
card->version = 2; /* CPC-PCI v2 */
max_chan = EMS_PCI_V2_MAX_CHAN;
base_bar = EMS_PCI_V2_BASE_BAR;
+ conf_bar = EMS_PCI_V2_CONF_BAR;
conf_size = EMS_PCI_V2_CONF_SIZE;
} else {
card->version = 1; /* CPC-PCI v1 */
max_chan = EMS_PCI_V1_MAX_CHAN;
base_bar = EMS_PCI_V1_BASE_BAR;
+ conf_bar = EMS_PCI_V1_CONF_BAR;
conf_size = EMS_PCI_V1_CONF_SIZE;
}
/* Remap configuration space and controller memory area */
- card->conf_addr = pci_iomap(pdev, 0, conf_size);
- if (card->conf_addr == NULL) {
+ card->conf_addr = pci_iomap(pdev, conf_bar, conf_size);
+ if (!card->conf_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
- if (card->base_addr == NULL) {
+ if (!card->base_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -276,12 +327,20 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
}
+ if (card->version == 3) {
+ /* ASIX chip asserts local reset to CAN controllers
+ * after bootup until it is deasserted
+ */
+ writel(readl(card->conf_addr + ASIX_LIEMR) & ~ASIX_LIEMR_LRST,
+ card->conf_addr + ASIX_LIEMR);
+ }
+
ems_pci_card_reset(card);
/* Detect available channels */
for (i = 0; i < max_chan; i++) {
dev = alloc_sja1000dev(0);
- if (dev == NULL) {
+ if (!dev) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -292,16 +351,25 @@ static int ems_pci_add_card(struct pci_dev *pdev,
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
- priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET
- + (i * EMS_PCI_CAN_CTRL_SIZE);
+
if (card->version == 1) {
priv->read_reg = ems_pci_v1_read_reg;
priv->write_reg = ems_pci_v1_write_reg;
priv->post_irq = ems_pci_v1_post_irq;
- } else {
+ priv->reg_base = card->base_addr + EMS_PCI_V1_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V1_CAN_CTRL_SIZE);
+ } else if (card->version == 2) {
priv->read_reg = ems_pci_v2_read_reg;
priv->write_reg = ems_pci_v2_write_reg;
priv->post_irq = ems_pci_v2_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V2_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V2_CAN_CTRL_SIZE);
+ } else {
+ priv->read_reg = ems_pci_v3_read_reg;
+ priv->write_reg = ems_pci_v3_write_reg;
+ priv->post_irq = ems_pci_v3_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V3_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V3_CAN_CTRL_SIZE);
}
/* Check if channel is present */
@@ -313,20 +381,28 @@ static int ems_pci_add_card(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
dev->dev_id = i;
- if (card->version == 1)
+ if (card->version == 1) {
/* reset int flag of pita */
writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
card->conf_addr + PITA2_ICR);
- else
+ } else if (card->version == 2) {
/* enable IRQ in PLX 9030 */
writel(PLX_ICSR_ENA_CLR,
card->conf_addr + PLX_ICSR);
+ } else {
+ /* Enable IRQ in AX99100 */
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+ /* Enable local INT0 input enable */
+ writel(readl(card->conf_addr + ASIX_LIEMR) | ASIX_LIEMR_L0EINTEN,
+ card->conf_addr + ASIX_LIEMR);
+ }
/* Register SJA1000 device */
err = register_sja1000dev(dev);
if (err) {
- dev_err(&pdev->dev, "Registering device failed "
- "(err=%d)\n", err);
+ dev_err(&pdev->dev,
+ "Registering device failed: %pe\n",
+ ERR_PTR(err));
free_sja1000dev(dev);
goto failure_cleanup;
}
@@ -334,7 +410,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels++;
dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n",
- i + 1, priv->reg_base, dev->irq);
+ i + 1, priv->reg_base, dev->irq);
} else {
free_sja1000dev(dev);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
index 3585f02575df..57eeb066a945 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -48,6 +48,7 @@ mcp251xfd_ring_set_ringparam(struct net_device *ndev,
priv->rx_obj_num = layout.cur_rx;
priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index bf3f0f150199..bfe4caa0c99d 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -30,11 +30,23 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
last_byte = mcp251xfd_last_byte_set(mask);
len = last_byte - first_byte + 1;
- data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
+ data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len);
val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
memcpy(data, &val_le32, len);
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) {
+ len += sizeof(write_reg_buf->nocrc.cmd);
+ } else if (len == 1) {
+ u16 crc;
+
+ /* CRC */
+ len += sizeof(write_reg_buf->safe.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->safe.crc);
+ } else {
u16 crc;
mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
@@ -46,8 +58,6 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
/* Total length */
len += sizeof(write_reg_buf->crc.crc);
- } else {
- len += sizeof(write_reg_buf->nocrc.cmd);
}
return len;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 2b0309fedfac..7024ff0cc2c0 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -504,6 +504,11 @@ union mcp251xfd_write_reg_buf {
u8 data[4];
__be16 crc;
} crc;
+ struct __packed {
+ struct mcp251xfd_buf_cmd cmd;
+ u8 data[1];
+ __be16 crc;
+ } safe;
} ____cacheline_aligned;
struct mcp251xfd_tx_obj {
@@ -759,6 +764,13 @@ mcp251xfd_spi_cmd_write_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd,
}
static inline void
+mcp251xfd_spi_cmd_write_safe_set_addr(struct mcp251xfd_buf_cmd *cmd,
+ u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE | addr);
+}
+
+static inline void
mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
u16 addr, u16 len)
{
@@ -769,14 +781,20 @@ mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
static inline u8 *
mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
union mcp251xfd_write_reg_buf *write_reg_buf,
- u16 addr)
+ u16 addr, u8 len)
{
u8 *data;
if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
- mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
- addr);
- data = write_reg_buf->crc.data;
+ if (len == 1) {
+ mcp251xfd_spi_cmd_write_safe_set_addr(&write_reg_buf->safe.cmd,
+ addr);
+ data = write_reg_buf->safe.data;
+ } else {
+ mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
+ addr);
+ data = write_reg_buf->crc.data;
+ }
} else {
mcp251xfd_spi_cmd_write_nocrc(&write_reg_buf->nocrc.cmd,
addr);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 687dd542f7f6..b211b6e283a2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -9,10 +9,11 @@
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
#include <asm/unaligned.h>
+
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -381,23 +382,42 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
}
/*
- * read device id from device
+ * read can channel id from device
*/
-static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
+static int pcan_usb_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id)
{
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args);
if (err)
- netdev_err(dev->netdev, "getting device id failure: %d\n", err);
+ netdev_err(dev->netdev, "getting can channel id failure: %d\n", err);
else
- *device_id = args[0];
+ *can_ch_id = args[0];
return err;
}
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN];
+
+ /* this kind of device supports 8-bit values only */
+ if (can_ch_id > U8_MAX)
+ return -EINVAL;
+
+ /* during the flash process the device disconnects during ~1.25 s.:
+ * prohibit access when interface is UP
+ */
+ if (dev->netdev->flags & IFF_UP)
+ return -EBUSY;
+
+ args[0] = can_ch_id;
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_DEVID, PCAN_USB_SET, args);
+}
+
/*
* update current time ref with received timestamp
*/
@@ -963,9 +983,18 @@ static int pcan_usb_set_phys_id(struct net_device *netdev,
return err;
}
+/* This device only handles 8-bit CAN channel id. */
+static int pcan_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u8);
+}
+
static const struct ethtool_ops pcan_usb_ethtool_ops = {
.set_phys_id = pcan_usb_set_phys_id,
.get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = pcan_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -1017,7 +1046,8 @@ const struct peak_usb_adapter pcan_usb = {
.dev_init = pcan_usb_init,
.dev_set_bus = pcan_usb_write_mode,
.dev_set_bittiming = pcan_usb_set_bittiming,
- .dev_get_device_id = pcan_usb_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_set_can_channel_id,
.dev_decode_buf = pcan_usb_decode_buf,
.dev_encode_msg = pcan_usb_encode_msg,
.dev_start = pcan_usb_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1d996d3320fe..d881e1d30183 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -8,13 +8,15 @@
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
+#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/usb.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -53,6 +55,26 @@ static const struct usb_device_id peak_usb_table[] = {
MODULE_DEVICE_TABLE(usb, peak_usb_table);
+static ssize_t can_channel_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct peak_usb_device *peak_dev = netdev_priv(netdev);
+
+ return sysfs_emit(buf, "%08X\n", peak_dev->can_channel_id);
+}
+static DEVICE_ATTR_RO(can_channel_id);
+
+/* mutable to avoid cast in attribute_group */
+static struct attribute *peak_usb_sysfs_attrs[] = {
+ &dev_attr_can_channel_id.attr,
+ NULL,
+};
+
+static const struct attribute_group peak_usb_sysfs_group = {
+ .name = "peak_usb",
+ .attrs = peak_usb_sysfs_attrs,
+};
+
/*
* dump memory
*/
@@ -808,6 +830,86 @@ static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+/* CAN-USB devices generally handle 32-bit CAN channel IDs.
+ * In case one doesn't, then it have to overload this function.
+ */
+int peak_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u32);
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id() operation. It is used
+ * here to fill the data buffer with the user defined CAN channel ID.
+ */
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err)
+ return err;
+
+ /* ethtool operates on individual bytes. The byte order of the CAN
+ * channel id in memory depends on the kernel architecture. We
+ * convert the CAN channel id back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy(data, (u8 *)&ch_id_le + eeprom->offset, eeprom->len);
+
+ /* update cached value */
+ dev->can_channel_id = ch_id;
+ return err;
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id()/dev_set_can_channel_id()
+ * operations. They are used here to set the new user defined CAN channel ID.
+ */
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ /* first, read the current user defined CAN channel ID */
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to init CAN channel id (err %d)\n", err);
+ return err;
+ }
+
+ /* do update the value with user given bytes.
+ * ethtool operates on individual bytes. The byte order of the CAN
+ * channel ID in memory depends on the kernel architecture. We
+ * convert the CAN channel ID back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy((u8 *)&ch_id_le + eeprom->offset, data, eeprom->len);
+ ch_id = le32_to_cpu(ch_id_le);
+
+ /* flash the new value now */
+ err = dev->adapter->dev_set_can_channel_id(dev, ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to write new CAN channel id (err %d)\n",
+ err);
+ return err;
+ }
+
+ /* update cached value with the new one */
+ dev->can_channel_id = ch_id;
+
+ return 0;
+}
+
int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
{
info->so_timestamping =
@@ -881,6 +983,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
/* add ethtool support */
netdev->ethtool_ops = peak_usb_adapter->ethtool_ops;
+ /* register peak_usb sysfs files */
+ netdev->sysfs_groups[0] = &peak_usb_sysfs_group;
+
init_usb_anchor(&dev->rx_submitted);
init_usb_anchor(&dev->tx_submitted);
@@ -921,12 +1026,11 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
goto adap_dev_free;
}
- /* get device number early */
- if (dev->adapter->dev_get_device_id)
- dev->adapter->dev_get_device_id(dev, &dev->device_number);
+ /* get CAN channel id early */
+ dev->adapter->dev_get_can_channel_id(dev, &dev->can_channel_id);
- netdev_info(netdev, "attached to %s channel %u (device %u)\n",
- peak_usb_adapter->name, ctrl_idx, dev->device_number);
+ netdev_info(netdev, "attached to %s channel %u (device 0x%08X)\n",
+ peak_usb_adapter->name, ctrl_idx, dev->can_channel_id);
return 0;
@@ -964,7 +1068,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev->state &= ~PCAN_USB_STATE_CONNECTED;
strscpy(name, netdev->name, IFNAMSIZ);
- unregister_netdev(netdev);
+ unregister_candev(netdev);
kfree(dev->cmd_buf);
dev->next_siblings = NULL;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index f6bdd8b3f290..980e315186cf 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -60,7 +60,8 @@ struct peak_usb_adapter {
int (*dev_set_data_bittiming)(struct peak_usb_device *dev,
struct can_bittiming *bt);
int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff);
- int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id);
+ int (*dev_get_can_channel_id)(struct peak_usb_device *dev, u32 *can_ch_id);
+ int (*dev_set_can_channel_id)(struct peak_usb_device *dev, u32 can_ch_id);
int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb);
int (*dev_encode_msg)(struct peak_usb_device *dev, struct sk_buff *skb,
u8 *obuf, size_t *size);
@@ -122,7 +123,8 @@ struct peak_usb_device {
u8 *cmd_buf;
struct usb_anchor rx_submitted;
- u32 device_number;
+ /* equivalent to the device ID in the Windows API */
+ u32 can_channel_id;
u8 device_rev;
u8 ep_msg_in;
@@ -147,4 +149,10 @@ void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+/* common 32-bit CAN channel ID ethtool management */
+int peak_usb_get_eeprom_len(struct net_device *netdev);
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 2ea1500df393..4d85b29a17b7 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -4,10 +4,10 @@
*
* Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -147,6 +147,15 @@ struct __packed pcan_ufd_ovr_msg {
u8 unused[3];
};
+#define PCAN_UFD_CMD_DEVID_SET 0x81
+
+struct __packed pcan_ufd_device_id {
+ __le16 opcode_channel;
+
+ u16 unused;
+ __le32 device_id;
+};
+
static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om)
{
return om->channel & 0xf;
@@ -234,6 +243,15 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
return err;
}
+static int pcan_usb_fd_read_fwinfo(struct peak_usb_device *dev,
+ struct pcan_ufd_fw_info *fw_info)
+{
+ return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
+ PCAN_USBPRO_INFO_FW,
+ fw_info,
+ sizeof(*fw_info));
+}
+
/* build the commands list in the given buffer, to enter operational mode */
static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
{
@@ -434,6 +452,34 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
return pcan_usb_fd_send_cmd(dev, ++cmd);
}
+/* read user CAN channel id from device */
+static int pcan_usb_fd_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
+{
+ int err;
+ struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev);
+
+ err = pcan_usb_fd_read_fwinfo(dev, &usb_if->fw_info);
+ if (err)
+ return err;
+
+ *can_ch_id = le32_to_cpu(usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ return err;
+}
+
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_fd_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ struct pcan_ufd_device_id *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
+ PCAN_UFD_CMD_DEVID_SET);
+ cmd->device_id = cpu_to_le32(can_ch_id);
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
/* handle restart but in asynchronously way
* (uses PCAN-USB Pro code to complete asynchronous request)
*/
@@ -907,10 +953,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
fw_info = &pdev->usb_if->fw_info;
- err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
- PCAN_USBPRO_INFO_FW,
- fw_info,
- sizeof(*fw_info));
+ err = pcan_usb_fd_read_fwinfo(dev, fw_info);
if (err) {
dev_err(dev->netdev->dev.parent,
"unable to read %s firmware info (err %d)\n",
@@ -972,7 +1015,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
}
pdev->usb_if->dev[dev->ctrl_idx] = dev;
- dev->device_number =
+ dev->can_channel_id =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
/* if vendor rsp is of type 2, then it contains EP numbers to
@@ -1081,6 +1124,9 @@ static int pcan_usb_fd_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_fd_ethtool_ops = {
.set_phys_id = pcan_usb_fd_set_phys_id,
.get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/* describes the PCAN-USB FD adapter */
@@ -1148,6 +1194,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1222,6 +1270,8 @@ const struct peak_usb_adapter pcan_usb_chip = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1296,6 +1346,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1370,6 +1422,8 @@ const struct peak_usb_adapter pcan_usb_x6 = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 5d8f6a40bb2c..f736196383ac 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -6,10 +6,10 @@
* Copyright (C) 2003-2011 PEAK System-Technik GmbH
* Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -76,6 +76,7 @@ static u16 pcan_usb_pro_sizeof_rec[256] = {
[PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter),
[PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts),
[PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid),
+ [PCAN_USBPRO_SETDEVID] = sizeof(struct pcan_usb_pro_devid),
[PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled),
[PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg),
[PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4,
@@ -149,6 +150,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
case PCAN_USBPRO_SETBTR:
case PCAN_USBPRO_GETDEVID:
+ case PCAN_USBPRO_SETDEVID:
*pc++ = va_arg(ap, int);
pc += 2;
*(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
@@ -419,8 +421,8 @@ static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode,
return pcan_usb_pro_send_cmd(dev, &um);
}
-static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
- u32 *device_id)
+static int pcan_usb_pro_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
{
struct pcan_usb_pro_devid *pdn;
struct pcan_usb_pro_msg um;
@@ -439,11 +441,23 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
return err;
pdn = (struct pcan_usb_pro_devid *)pc;
- *device_id = le32_to_cpu(pdn->dev_num);
+ *can_ch_id = le32_to_cpu(pdn->dev_num);
return err;
}
+static int pcan_usb_pro_set_can_channel_id(struct peak_usb_device *dev,
+ u32 can_ch_id)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETDEVID, dev->ctrl_idx,
+ can_ch_id);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev,
struct can_bittiming *bt)
{
@@ -1023,6 +1037,9 @@ static int pcan_usb_pro_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_pro_ethtool_ops = {
.set_phys_id = pcan_usb_pro_set_phys_id,
.get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -1076,7 +1093,8 @@ const struct peak_usb_adapter pcan_usb_pro = {
.dev_free = pcan_usb_pro_free,
.dev_set_bus = pcan_usb_pro_set_bus,
.dev_set_bittiming = pcan_usb_pro_set_bittiming,
- .dev_get_device_id = pcan_usb_pro_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_pro_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_pro_set_can_channel_id,
.dev_decode_buf = pcan_usb_pro_decode_buf,
.dev_encode_msg = pcan_usb_pro_encode_msg,
.dev_start = pcan_usb_pro_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index a34e0fc021c9..28e740af905d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -62,6 +62,7 @@ struct __packed pcan_usb_pro_fwinfo {
#define PCAN_USBPRO_SETBTR 0x02
#define PCAN_USBPRO_SETBUSACT 0x04
#define PCAN_USBPRO_SETSILENT 0x05
+#define PCAN_USBPRO_SETDEVID 0x06
#define PCAN_USBPRO_SETFILTR 0x0a
#define PCAN_USBPRO_SETTS 0x10
#define PCAN_USBPRO_GETDEVID 0x12
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 0546c573668a..394ca8678d2b 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -11,7 +11,6 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
config NET_DSA_MICROCHIP_KSZ9477_I2C
tristate "KSZ series I2C connected switch driver"
depends on NET_DSA_MICROCHIP_KSZ_COMMON && I2C
- depends on PTP_1588_CLOCK_OPTIONAL
select REGMAP_I2C
help
Select to enable support for registering switches configured through I2C.
@@ -19,7 +18,6 @@ config NET_DSA_MICROCHIP_KSZ9477_I2C
config NET_DSA_MICROCHIP_KSZ_SPI
tristate "KSZ series SPI connected switch driver"
depends on NET_DSA_MICROCHIP_KSZ_COMMON && SPI
- depends on PTP_1588_CLOCK_OPTIONAL
select REGMAP_SPI
help
Select to enable support for registering switches configured through SPI.
@@ -27,6 +25,7 @@ config NET_DSA_MICROCHIP_KSZ_SPI
config NET_DSA_MICROCHIP_KSZ_PTP
bool "Support for the PTP clock on the KSZ9563/LAN937x Ethernet Switch"
depends on NET_DSA_MICROCHIP_KSZ_COMMON && PTP_1588_CLOCK
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON=m || PTP_1588_CLOCK=y
help
Select to enable support for timestamping & PTP clock manipulation in
KSZ8563/KSZ9563/LAN937x series of switches. KSZ9563/KSZ8563 supports
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 616b21c90d05..3a15015bc409 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1302,14 +1302,26 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
if (!priv->ports[port].pvid)
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
- }
- /* Set the port as a user port which is to be able to recognize VID
- * from incoming packets before fetching entry within the VLAN table.
- */
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
- VLAN_ATTR(MT7530_VLAN_USER) |
- PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ /* Set the port as a user port which is to be able to recognize
+ * VID from incoming packets before fetching entry within the
+ * VLAN table.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port),
+ VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER) |
+ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ } else {
+ /* Also set CPU ports to the "user" VLAN port attribute, to
+ * allow VLAN classification, but keep the EG_TAG attribute as
+ * "consistent" (i.o.w. don't change its value) for packets
+ * received by the switch from the CPU, so that tagged packets
+ * are forwarded to user ports as tagged, and untagged as
+ * untagged.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER));
+ }
}
static void
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 60f1f7ada465..640725524d0c 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -8,6 +8,26 @@ config NET_DSA_MSCC_FELIX_DSA_LIB
Its name comes from the first hardware chip to make use of it
(VSC9959), code named Felix.
+config NET_DSA_MSCC_OCELOT_EXT
+ tristate "Ocelot External Ethernet switch support"
+ depends on NET_DSA && SPI
+ depends on NET_VENDOR_MICROSEMI
+ select MDIO_MSCC_MIIM
+ select MFD_OCELOT_CORE
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
+ select NET_DSA_TAG_OCELOT_8021Q
+ select NET_DSA_TAG_OCELOT
+ help
+ This driver supports the VSC7511, VSC7512, VSC7513 and VSC7514 chips
+ when controlled through SPI.
+
+ The Ocelot switch family is a set of multi-port networking chips. All
+ of these chips have the ability to be controlled externally through
+ SPI or PCIe interfaces.
+
+ Say "Y" here to enable external control to these chips.
+
config NET_DSA_MSCC_FELIX
tristate "Ocelot / Felix Ethernet switch support"
depends on NET_DSA && PCI
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
index fd7dde570d4e..ead868a293e3 100644
--- a/drivers/net/dsa/ocelot/Makefile
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_MSCC_FELIX_DSA_LIB) += mscc_felix_dsa_lib.o
obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+obj-$(CONFIG_NET_DSA_MSCC_OCELOT_EXT) += mscc_ocelot_ext.o
obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o
mscc_felix_dsa_lib-objs := felix.o
mscc_felix-objs := felix_vsc9959.o
+mscc_ocelot_ext-objs := ocelot_ext.o
mscc_seville-objs := seville_vsc9953.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index f57b4095b793..d4cc9e60f369 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1075,9 +1075,12 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -1092,7 +1095,7 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex, tx_pause, rx_pause,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
@@ -1270,10 +1273,15 @@ static int felix_parse_ports_node(struct felix *felix,
err = felix_validate_phy_mode(felix, port, phy_mode);
if (err < 0) {
- dev_err(dev, "Unsupported PHY mode %s on port %d\n",
- phy_modes(phy_mode), port);
+ dev_info(dev, "Unsupported PHY mode %s on port %d\n",
+ phy_modes(phy_mode), port);
of_node_put(child);
- return err;
+
+ /* Leave port_phy_modes[port] = 0, which is also
+ * PHY_INTERFACE_MODE_NA. This will perform a
+ * best-effort to bring up as many ports as possible.
+ */
+ continue;
}
port_phy_modes[port] = phy_mode;
@@ -1312,6 +1320,13 @@ static struct regmap *felix_request_regmap_by_name(struct felix *felix,
struct resource res;
int i;
+ /* In an MFD configuration, regmaps are registered directly to the
+ * parent device before the child devices are probed, so there is no
+ * need to initialize a new one.
+ */
+ if (!felix->info->resources)
+ return dev_get_regmap(ocelot->dev->parent, resource_name);
+
for (i = 0; i < felix->info->num_resources; i++) {
if (strcmp(resource_name, felix->info->resources[i].name))
continue;
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index be22d6ccd7c8..d5d0b30c0b75 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -7,6 +7,7 @@
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
+#define OCELOT_PORT_MODE_NONE 0
#define OCELOT_PORT_MODE_INTERNAL BIT(0)
#define OCELOT_PORT_MODE_SGMII BIT(1)
#define OCELOT_PORT_MODE_QSGMII BIT(2)
@@ -36,6 +37,7 @@ struct felix_info {
u16 vcap_pol_base2;
u16 vcap_pol_max2;
const struct ptp_clock_info *ptp_caps;
+ unsigned long quirks;
/* Some Ocelot switches are integrated into the SoC without the
* extraction IRQ line connected to the ARM GIC. By enabling this
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 43dc8ed4854d..354aa3dbfde7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -2593,6 +2593,7 @@ static const struct felix_info felix_info_vsc9959 = {
.num_mact_rows = 2048,
.num_ports = VSC9959_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
+ .quirks = FELIX_MAC_QUIRKS,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
new file mode 100644
index 000000000000..14efa6387bd7
--- /dev/null
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ */
+
+#include <linux/mfd/ocelot.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <soc/mscc/ocelot.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "felix.h"
+
+#define VSC7514_NUM_PORTS 11
+
+#define OCELOT_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc7512_port_modes[VSC7514_NUM_PORTS] = {
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+};
+
+static const struct ocelot_ops ocelot_ext_ops = {
+ .reset = ocelot_reset,
+ .wm_enc = ocelot_wm_enc,
+ .wm_dec = ocelot_wm_dec,
+ .wm_stat = ocelot_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+};
+
+static const char * const vsc7512_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [QS] = "qs",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
+};
+
+static const struct felix_info vsc7512_info = {
+ .resource_names = vsc7512_resource_names,
+ .regfields = vsc7514_regfields,
+ .map = vsc7514_regmap,
+ .ops = &ocelot_ext_ops,
+ .vcap = vsc7514_vcap_props,
+ .num_mact_rows = 1024,
+ .num_ports = VSC7514_NUM_PORTS,
+ .num_tx_queues = OCELOT_NUM_TC,
+ .port_modes = vsc7512_port_modes,
+};
+
+static int ocelot_ext_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ felix = kzalloc(sizeof(*felix), GFP_KERNEL);
+ if (!felix)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, felix);
+
+ ocelot = &felix->ocelot;
+ ocelot->dev = dev;
+
+ ocelot->num_flooding_pgids = 1;
+
+ felix->info = &vsc7512_info;
+
+ ds = kzalloc(sizeof(*ds), GFP_KERNEL);
+ if (!ds) {
+ err = -ENOMEM;
+ dev_err_probe(dev, err, "Failed to allocate DSA switch\n");
+ goto err_free_felix;
+ }
+
+ ds->dev = dev;
+ ds->num_ports = felix->info->num_ports;
+ ds->num_tx_queues = felix->info->num_tx_queues;
+
+ ds->ops = &felix_switch_ops;
+ ds->priv = ocelot;
+ felix->ds = ds;
+ felix->tag_proto = DSA_TAG_PROTO_OCELOT;
+
+ err = dsa_register_switch(ds);
+ if (err) {
+ dev_err_probe(dev, err, "Failed to register DSA switch\n");
+ goto err_free_ds;
+ }
+
+ return 0;
+
+err_free_ds:
+ kfree(ds);
+err_free_felix:
+ kfree(felix);
+ return err;
+}
+
+static int ocelot_ext_remove(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return 0;
+
+ dsa_unregister_switch(felix->ds);
+
+ kfree(felix->ds);
+ kfree(felix);
+
+ return 0;
+}
+
+static void ocelot_ext_shutdown(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id ocelot_ext_switch_of_match[] = {
+ { .compatible = "mscc,vsc7512-switch" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ocelot_ext_switch_of_match);
+
+static struct platform_driver ocelot_ext_switch_driver = {
+ .driver = {
+ .name = "ocelot-switch",
+ .of_match_table = of_match_ptr(ocelot_ext_switch_of_match),
+ },
+ .probe = ocelot_ext_probe,
+ .remove = ocelot_ext_remove,
+ .shutdown = ocelot_ext_shutdown,
+};
+module_platform_driver(ocelot_ext_switch_driver);
+
+MODULE_DESCRIPTION("External Ocelot Switch driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(MFD_OCELOT);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 88ed3a2e487a..287b64b788db 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -971,6 +971,7 @@ static const struct felix_info seville_info_vsc9953 = {
.vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
.vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
.vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
+ .quirks = FELIX_MAC_QUIRKS,
.num_mact_rows = 2048,
.num_ports = VSC9953_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 3fd9728f817f..3b70f6737633 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1285,6 +1285,22 @@
#define MDIO_PMA_RX_CTRL1 0x8051
#endif
+#ifndef MDIO_PMA_RX_LSTS
+#define MDIO_PMA_RX_LSTS 0x018020
+#endif
+
+#ifndef MDIO_PMA_RX_EQ_CTRL4
+#define MDIO_PMA_RX_EQ_CTRL4 0x0001805C
+#endif
+
+#ifndef MDIO_PMA_MP_MISC_STS
+#define MDIO_PMA_MP_MISC_STS 0x0078
+#endif
+
+#ifndef MDIO_PMA_PHY_RX_EQ_CEU
+#define MDIO_PMA_PHY_RX_EQ_CEU 0x1800E
+#endif
+
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
@@ -1395,6 +1411,28 @@
#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+#define XGBE_PMA_RX_SIG_DET_0_MASK BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_ENABLE BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_VALID_0_MASK BIT(12)
+#define XGBE_PMA_RX_VALID_0_ENABLE BIT(12)
+#define XGBE_PMA_RX_VALID_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_AD_REQ_MASK BIT(12)
+#define XGBE_PMA_RX_AD_REQ_ENABLE BIT(12)
+#define XGBE_PMA_RX_AD_REQ_DISABLE 0x0000
+
+#define XGBE_PMA_RX_ADPT_ACK_MASK BIT(12)
+#define XGBE_PMA_RX_ADPT_ACK BIT(12)
+
+#define XGBE_PMA_CFF_UPDTM1_VLD BIT(8)
+#define XGBE_PMA_CFF_UPDT0_VLD BIT(9)
+#define XGBE_PMA_CFF_UPDT1_VLD BIT(10)
+#define XGBE_PMA_CFF_UPDT_MASK (XGBE_PMA_CFF_UPDTM1_VLD |\
+ XGBE_PMA_CFF_UPDT0_VLD | \
+ XGBE_PMA_CFF_UPDT1_VLD)
+
#define XGBE_PMA_PLL_CTRL_MASK BIT(15)
#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15)
#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index f4683d53e58c..16e7fb2c0dae 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -388,6 +388,10 @@ struct xgbe_phy_data {
static DEFINE_MUTEX(xgbe_phy_comm_lock);
static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
+static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ enum xgbe_mb_cmd cmd,
+ enum xgbe_mb_subcmd sub_cmd);
static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
struct xgbe_i2c_op *i2c_op)
@@ -1882,6 +1886,9 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
if (phy_data->phydev &&
(phy_data->phydev->speed == SPEED_10000))
XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_2500))
+ XGBE_SET_ADV(dlks, 2500baseX_Full);
else
XGBE_SET_ADV(dlks, 1000baseKX_Full);
break;
@@ -2035,6 +2042,93 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
+#define MAX_RX_ADAPT_RETRIES 1
+#define XGBE_PMA_RX_VAL_SIG_MASK (XGBE_PMA_RX_SIG_DET_0_MASK | \
+ XGBE_PMA_RX_VALID_0_MASK)
+
+static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+
+ xgbe_phy_perform_ratechange(pdata,
+ mode == XGBE_MODE_KR ?
+ XGBE_MB_CMD_SET_10G_KR :
+ XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
+}
+
+static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* step 2: force PCS to send RX_ADAPT Req to PHY */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_ENABLE);
+
+ /* Step 3: Wait for RX_ADAPT ACK from the PHY */
+ msleep(200);
+
+ /* Software polls for coefficient update command (given by local PHY) */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_PHY_RX_EQ_CEU);
+
+ /* Clear the RX_AD_REQ bit */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_DISABLE);
+
+ /* Check if coefficient update command is set */
+ if ((reg & XGBE_PMA_CFF_UPDT_MASK) != XGBE_PMA_CFF_UPDT_MASK)
+ goto set_mode;
+
+ /* Step 4: Check for Block lock */
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS) {
+ /* If the block lock is found, update the helpers
+ * and declare the link up
+ */
+ netif_dbg(pdata, link, pdata->netdev, "Block_lock done");
+ pdata->rx_adapt_done = true;
+ pdata->mode_set = false;
+ return;
+ }
+
+set_mode:
+ xgbe_set_rx_adap_mode(pdata, phy_data->cur_mode);
+}
+
+static void xgbe_phy_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+rx_adapt_reinit:
+ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_LSTS,
+ XGBE_PMA_RX_VAL_SIG_MASK);
+
+ /* step 1: Check for RX_VALID && LF_SIGDET */
+ if ((reg & XGBE_PMA_RX_VAL_SIG_MASK) != XGBE_PMA_RX_VAL_SIG_MASK) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "RX_VALID or LF_SIGDET is unset, issue rrc");
+ xgbe_phy_rrc(pdata);
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+ goto rx_adapt_reinit;
+ }
+
+ /* perform rx adaptation */
+ xgbe_rx_adaptation(pdata);
+}
+
static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
{
int reg;
@@ -2100,7 +2194,7 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
- goto reenable_pll;
+ goto do_rx_adaptation;
usleep_range(1000, 2000);
}
@@ -2110,6 +2204,20 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
/* Reset on error */
xgbe_phy_rx_reset(pdata);
+ goto reenable_pll;
+
+do_rx_adaptation:
+ if (pdata->en_rx_adap && sub_cmd == XGBE_MB_SUBCMD_RX_ADAP &&
+ (cmd == XGBE_MB_CMD_SET_10G_KR || cmd == XGBE_MB_CMD_SET_10G_SFI)) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "Enabling RX adaptation\n");
+ pdata->mode_set = true;
+ xgbe_phy_rx_adaptation(pdata);
+ /* return from here to avoid enabling PLL ctrl
+ * during adaptation phase
+ */
+ return;
+ }
reenable_pll:
/* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
@@ -2138,6 +2246,31 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
}
+static bool enable_rx_adap(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
+ /* Rx-Adaptation is not supported on older platforms(< 0x30H) */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ if (ver < 0x30)
+ return false;
+
+ /* Re-driver models 4223 && 4227 do not support Rx-Adaptation */
+ if (phy_data->redrv &&
+ (phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4223 ||
+ phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4227))
+ return false;
+
+ /* 10G KR mode with AN does not support Rx-Adaptation */
+ if (mode == XGBE_MODE_KR &&
+ phy_data->port_mode != XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG)
+ return false;
+
+ pdata->en_rx_adap = 1;
+ return true;
+}
+
static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2146,7 +2279,12 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
/* 10G/SFI */
if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
+ pdata->en_rx_adap = 0;
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);
+ } else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
+ (enable_rx_adap(pdata, XGBE_MODE_SFI))) {
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
} else {
if (phy_data->sfp_cable_len <= 1)
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
@@ -2227,7 +2365,12 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 10G/KR */
- xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE);
+ if (enable_rx_adap(pdata, XGBE_MODE_KR))
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_RX_ADAP);
+ else
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_KR;
@@ -2282,9 +2425,11 @@ static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata)
case XGBE_MODE_SGMII_100:
case XGBE_MODE_SGMII_1000:
return XGBE_MODE_KR;
+ case XGBE_MODE_KX_2500:
+ return XGBE_MODE_SGMII_1000;
case XGBE_MODE_KR:
default:
- return XGBE_MODE_SGMII_1000;
+ return XGBE_MODE_KX_2500;
}
}
@@ -2644,7 +2789,8 @@ static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_prv_data *pdata,
case SPEED_1000:
return true;
case SPEED_2500:
- return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
+ return ((phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T) ||
+ (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T));
case SPEED_10000:
return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
default:
@@ -2737,8 +2883,11 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) {
+ if (pdata->en_rx_adap)
+ pdata->rx_adapt_done = false;
return 0;
+ }
}
if (phy_data->phydev) {
@@ -2760,7 +2909,29 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
*/
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- if (reg & MDIO_STAT1_LSTATUS)
+
+ if (pdata->en_rx_adap) {
+ /* if the link is available and adaptation is done,
+ * declare link up
+ */
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ return 1;
+ /* If either link is not available or adaptation is not done,
+ * retrigger the adaptation logic. (if the mode is not set,
+ * then issue mailbox command first)
+ */
+ if (pdata->mode_set) {
+ xgbe_phy_rx_adaptation(pdata);
+ } else {
+ pdata->rx_adapt_done = false;
+ xgbe_phy_set_mode(pdata, phy_data->cur_mode);
+ }
+
+ /* check again for the link and adaptation status */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ return 1;
+ } else if (reg & MDIO_STAT1_LSTATUS)
return 1;
if (pdata->phy.autoneg == AUTONEG_ENABLE &&
@@ -3024,6 +3195,7 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
break;
@@ -3474,6 +3646,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, 1000baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_1000;
}
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
+ XGBE_SET_SUP(lks, 2500baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
XGBE_SET_SUP(lks, 10000baseT_Full);
phy_data->start_mode = XGBE_MODE_KR;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 16e73df3e9b9..ad136ed493ed 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -625,6 +625,7 @@ enum xgbe_mb_cmd {
enum xgbe_mb_subcmd {
XGBE_MB_SUBCMD_NONE = 0,
+ XGBE_MB_SUBCMD_RX_ADAP,
/* 10GbE SFP subcommands */
XGBE_MB_SUBCMD_ACTIVE = 0,
@@ -1316,6 +1317,10 @@ struct xgbe_prv_data {
bool debugfs_an_cdr_workaround;
bool debugfs_an_cdr_track_early;
+ bool en_rx_adap;
+ int rx_adapt_retries;
+ bool rx_adapt_done;
+ bool mode_set;
};
/* Function prototypes*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 77609dc0a08d..0b2a52199914 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -21,6 +21,7 @@
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <linux/filter.h>
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f4ca0c6c0f51..948586bf1b5b 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -213,6 +213,7 @@ config BNXT
select NET_DEVLINK
select PAGE_POOL
select DIMLIB
+ select AUXILIARY_BUS
help
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index b751dc8486dc..392ec09a1d8a 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -196,28 +196,6 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg,
return 0;
}
-static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
-{
- u32 val;
-
- bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
- (index << CAM_CTRL_INDEX_SHIFT)));
-
- b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
-
- val = br32(bp, B44_CAM_DATA_LO);
-
- data[2] = (val >> 24) & 0xFF;
- data[3] = (val >> 16) & 0xFF;
- data[4] = (val >> 8) & 0xFF;
- data[5] = (val >> 0) & 0xFF;
-
- val = br32(bp, B44_CAM_DATA_HI);
-
- data[0] = (val >> 8) & 0xFF;
- data[1] = (val >> 0) & 0xFF;
-}
-
static inline void __b44_cam_write(struct b44 *bp,
const unsigned char *data, int index)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a1b4356dfb6c..ab5dfb3a4081 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2414,7 +2414,6 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
bnxt_queue_sp_work(bp);
async_event_process_exit:
- bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -5538,7 +5537,7 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
- if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ if (!vnic_id && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
@@ -13181,6 +13180,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
+ bnxt_rdma_aux_device_uninit(bp);
+
bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
@@ -13779,11 +13780,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_dl_fw_reporters_create(bp);
+ bnxt_rdma_aux_device_init(bp);
+
bnxt_print_device_info(bp);
pci_save_state(pdev);
- return 0;
+ return 0;
init_err_cleanup:
bnxt_dl_unregister(bp);
init_err_dl:
@@ -13827,7 +13830,6 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
dev_close(dev);
- bnxt_ulp_shutdown(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5163ef4a49ea..dcb09fbe4007 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
#include <linux/crash_dump.h>
+#include <linux/auxiliary_bus.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/xdp.h>
@@ -1631,6 +1632,12 @@ struct bnxt_fw_health {
#define BNXT_FW_IF_RETRY 10
#define BNXT_FW_SLOT_RESET_RETRY 4
+struct bnxt_aux_priv {
+ struct auxiliary_device aux_dev;
+ struct bnxt_en_dev *edev;
+ int id;
+};
+
enum board_idx {
BCM57301,
BCM57302,
@@ -1852,6 +1859,7 @@ struct bnxt {
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+ struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
struct bnxt_napi **bnapi;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 26913dc816d3..8b3e7697390f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -1303,7 +1303,6 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
- devlink_set_features(dl, DEVLINK_F_RELOAD);
out:
devlink_register(dl);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a4cba7cb2783..3ed3a2b3b3a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -749,7 +749,6 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
*num_vfs = rc;
}
- bnxt_ulp_sriov_cfg(bp, *num_vfs);
return 0;
}
@@ -823,10 +822,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out2;
rc = pci_enable_sriov(bp->pdev, *num_vfs);
- if (rc) {
- bnxt_ulp_sriov_cfg(bp, 0);
+ if (rc)
goto err_out2;
- }
return 0;
@@ -872,8 +869,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
rtnl_unlock();
-
- bnxt_ulp_sriov_cfg(bp, 0);
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 2e54bf4fc7a7..d4cc9c371e7b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -19,89 +19,26 @@
#include <linux/irq.h>
#include <asm/byteorder.h>
#include <linux/bitmap.h>
+#include <linux/auxiliary_bus.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
-static int bnxt_register_dev(struct bnxt_en_dev *edev, unsigned int ulp_id,
- struct bnxt_ulp_ops *ulp_ops, void *handle)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
-
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
- if (rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
- return -EBUSY;
- }
- if (ulp_id == BNXT_ROCE_ULP) {
- unsigned int max_stat_ctxs;
-
- max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
- if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->cp_nr_rings == max_stat_ctxs)
- return -ENOMEM;
- }
-
- atomic_set(&ulp->ref_count, 0);
- ulp->handle = handle;
- rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
-
- if (ulp_id == BNXT_ROCE_ULP) {
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, 0);
- }
-
- return 0;
-}
-
-static int bnxt_unregister_dev(struct bnxt_en_dev *edev, unsigned int ulp_id)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
- int i = 0;
-
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
- if (!rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
- return -EINVAL;
- }
- if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
- edev->en_ops->bnxt_free_msix(edev, ulp_id);
-
- if (ulp->max_async_event_id)
- bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
-
- RCU_INIT_POINTER(ulp->ulp_ops, NULL);
- synchronize_rcu();
- ulp->max_async_event_id = 0;
- ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
- return 0;
-}
+static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
- num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ if (!edev->ulp_tbl->msix_requested) {
+ netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
+ return;
+ }
+ num_msix = edev->ulp_tbl->msix_requested;
+ idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
@@ -115,125 +52,95 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
}
}
-static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id,
- struct bnxt_msix_entry *ent, int num_msix)
+int bnxt_register_dev(struct bnxt_en_dev *edev,
+ struct bnxt_ulp_ops *ulp_ops,
+ void *handle)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
- struct bnxt_hw_resc *hw_resc;
- int max_idx, max_cp_rings;
- int avail_msix, idx;
- int total_vecs;
- int rc = 0;
-
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- return -ENODEV;
+ unsigned int max_stat_ctxs;
+ struct bnxt_ulp *ulp;
- if (edev->ulp_tbl[ulp_id].msix_requested)
- return -EAGAIN;
+ max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+ if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
+ bp->cp_nr_rings == max_stat_ctxs)
+ return -ENOMEM;
- max_cp_rings = bnxt_get_max_func_cp_rings(bp);
- avail_msix = bnxt_get_avail_msix(bp, num_msix);
- if (!avail_msix)
+ ulp = edev->ulp_tbl;
+ if (!ulp)
return -ENOMEM;
- if (avail_msix > num_msix)
- avail_msix = num_msix;
-
- if (BNXT_NEW_RM(bp)) {
- idx = bp->cp_nr_rings;
- } else {
- max_idx = min_t(int, bp->total_irqs, max_cp_rings);
- idx = max_idx - avail_msix;
- }
- edev->ulp_tbl[ulp_id].msix_base = idx;
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- hw_resc = &bp->hw_resc;
- total_vecs = idx + avail_msix;
- if (bp->total_irqs < total_vecs ||
- (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
- if (netif_running(dev)) {
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- } else {
- rc = bnxt_reserve_rings(bp, true);
- }
- }
- if (rc) {
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- return -EAGAIN;
- }
- if (BNXT_NEW_RM(bp)) {
- int resv_msix;
+ ulp->handle = handle;
+ rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
+
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_hwrm_vnic_cfg(bp, 0);
- resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
- avail_msix = min_t(int, resv_msix, avail_msix);
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- }
- bnxt_fill_msix_vecs(bp, ent);
+ bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return avail_msix;
+ return 0;
}
+EXPORT_SYMBOL(bnxt_register_dev);
-static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id)
+void bnxt_unregister_dev(struct bnxt_en_dev *edev)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+ int i = 0;
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
+ ulp = edev->ulp_tbl;
+ if (ulp->msix_requested)
+ edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
- return 0;
+ if (ulp->max_async_event_id)
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
- bnxt_close_nic(bp, true, false);
- bnxt_open_nic(bp, true, false);
+ RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+ synchronize_rcu();
+ ulp->max_async_event_id = 0;
+ ulp->async_events_bmap = NULL;
+ while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
+ msleep(100);
+ i++;
}
- return 0;
+ return;
}
+EXPORT_SYMBOL(bnxt_unregister_dev);
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_en_dev *edev = bp->edev;
+ u32 roce_msix = BNXT_VF(bp) ?
+ BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- }
- return 0;
+ return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
+ min_t(u32, roce_msix, num_online_cpus()) : 0);
}
int bnxt_get_ulp_msix_base(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ if (edev->ulp_tbl->msix_requested)
+ return edev->ulp_tbl->msix_base;
}
return 0;
}
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ if (edev->ulp_tbl->msix_requested)
return BNXT_MIN_ROCE_STAT_CTXS;
}
return 0;
}
-static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
+int bnxt_send_msg(struct bnxt_en_dev *edev,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
@@ -243,7 +150,7 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
u32 resp_len;
int rc;
- if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
+ if (bp->fw_reset_state)
return -EBUSY;
rc = hwrm_req_init(bp, req, 0 /* don't care */);
@@ -267,42 +174,36 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
hwrm_req_drop(bp, req);
return rc;
}
-
-static void bnxt_ulp_get(struct bnxt_ulp *ulp)
-{
- atomic_inc(&ulp->ref_count);
-}
-
-static void bnxt_ulp_put(struct bnxt_ulp *ulp)
-{
- atomic_dec(&ulp->ref_count);
-}
+EXPORT_SYMBOL(bnxt_send_msg);
void bnxt_ulp_stop(struct bnxt *bp)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
if (!edev)
return;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ if (aux_priv) {
+ struct auxiliary_device *adev;
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_stop)
- continue;
- ops->ulp_stop(ulp->handle);
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ struct auxiliary_driver *adrv;
+ pm_message_t pm = {};
+
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->suspend(adev, pm);
+ }
}
}
void bnxt_ulp_start(struct bnxt *bp, int err)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
if (!edev)
return;
@@ -312,58 +213,19 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_start)
- continue;
- ops->ulp_start(ulp->handle);
- }
-}
-
-void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
+ if (aux_priv) {
+ struct auxiliary_device *adev;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ struct auxiliary_driver *adrv;
- rcu_read_lock();
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_sriov_config) {
- rcu_read_unlock();
- continue;
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->resume(adev);
}
- bnxt_ulp_get(ulp);
- rcu_read_unlock();
- ops->ulp_sriov_config(ulp->handle, num_vfs);
- bnxt_ulp_put(ulp);
}
-}
-void bnxt_ulp_shutdown(struct bnxt *bp)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_shutdown)
- continue;
- ops->ulp_shutdown(ulp->handle);
- }
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
@@ -374,8 +236,8 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
if (!ulp->msix_requested)
return;
@@ -395,8 +257,8 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
struct bnxt_msix_entry *ent = NULL;
if (!ulp->msix_requested)
@@ -418,46 +280,15 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
-{
- u16 event_id = le16_to_cpu(cmpl->event_id);
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- rcu_read_lock();
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_async_notifier)
- continue;
- if (!ulp->async_events_bmap ||
- event_id > ulp->max_async_event_id)
- continue;
-
- /* Read max_async_event_id first before testing the bitmap. */
- smp_rmb();
- if (test_bit(event_id, ulp->async_events_bmap))
- ops->ulp_async_notifier(ulp->handle, cmpl);
- }
- rcu_read_unlock();
-}
-
-static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp_id,
- unsigned long *events_bmap, u16 max_id)
+int bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap,
+ u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
+ ulp = edev->ulp_tbl;
ulp->async_events_bmap = events_bmap;
/* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb();
@@ -465,38 +296,121 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
return 0;
}
+EXPORT_SYMBOL(bnxt_register_async_events);
-static const struct bnxt_en_ops bnxt_en_ops_tbl = {
- .bnxt_register_device = bnxt_register_dev,
- .bnxt_unregister_device = bnxt_unregister_dev,
- .bnxt_request_msix = bnxt_req_msix_vecs,
- .bnxt_free_msix = bnxt_free_msix_vecs,
- .bnxt_send_fw_msg = bnxt_send_msg,
- .bnxt_register_fw_async_events = bnxt_register_async_events,
-};
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
+{
+ struct bnxt_aux_priv *aux_priv;
+ struct auxiliary_device *adev;
+
+ /* Skip if no auxiliary device init was done. */
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ aux_priv = bp->aux_priv;
+ adev = &aux_priv->aux_dev;
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
+static void bnxt_aux_dev_release(struct device *dev)
{
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_en_dev *edev;
+ struct bnxt_aux_priv *aux_priv =
+ container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
+
+ ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv->edev->ulp_tbl);
+ kfree(aux_priv->edev);
+ kfree(aux_priv);
+}
+
+static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
+{
+ edev->net = bp->dev;
+ edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
- edev = bp->edev;
- if (!edev) {
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return ERR_PTR(-ENOMEM);
- edev->en_ops = &bnxt_en_ops_tbl;
- edev->net = dev;
- edev->pdev = bp->pdev;
- edev->l2_db_size = bp->db_size;
- edev->l2_db_size_nc = bp->db_size;
- bp->edev = edev;
- }
- edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
- return bp->edev;
+ if (bp->flags & BNXT_FLAG_VF)
+ edev->flags |= BNXT_EN_FLAG_VF;
+
+ edev->chip_num = bp->chip_num;
+ edev->hw_ring_stats_size = bp->hw_ring_stats_size;
+ edev->pf_port_id = bp->pf.port_id;
+ edev->en_state = bp->state;
+
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
+}
+
+void bnxt_rdma_aux_device_init(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ struct bnxt_aux_priv *aux_priv;
+ struct bnxt_en_dev *edev;
+ struct bnxt_ulp *ulp;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ bp->aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
+ if (!bp->aux_priv)
+ goto exit;
+
+ bp->aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
+ if (bp->aux_priv->id < 0) {
+ netdev_warn(bp->dev,
+ "ida alloc failed for ROCE auxiliary device\n");
+ kfree(bp->aux_priv);
+ goto exit;
+ }
+
+ aux_priv = bp->aux_priv;
+ aux_dev = &aux_priv->aux_dev;
+ aux_dev->id = aux_priv->id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &bp->pdev->dev;
+ aux_dev->dev.release = bnxt_aux_dev_release;
+
+ rc = auxiliary_device_init(aux_dev);
+ if (rc) {
+ ida_free(&bnxt_aux_dev_ids, bp->aux_priv->id);
+ kfree(bp->aux_priv);
+ goto exit;
+ }
+
+ /* From this point, all cleanup will happen via the .release callback &
+ * any error unwinding will need to include a call to
+ * auxiliary_device_uninit.
+ */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ goto aux_dev_uninit;
+
+ ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+ if (!ulp)
+ goto aux_dev_uninit;
+
+ edev->ulp_tbl = ulp;
+ aux_priv->edev = edev;
+ bp->edev = edev;
+ bnxt_set_edev_info(edev, bp);
+
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ netdev_warn(bp->dev,
+ "Failed to add auxiliary device for ROCE\n");
+ goto aux_dev_uninit;
+ }
+
+ return;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(aux_dev);
+exit:
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
}
-EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 42b50abc3e91..80cbc4b6130a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -15,6 +15,8 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
+#define BNXT_MAX_ROCE_MSIX 9
+#define BNXT_MAX_VF_ROCE_MSIX 2
struct hwrm_async_event_cmpl;
struct bnxt;
@@ -26,12 +28,6 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- /* async_notifier() cannot sleep (in BH context) */
- void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
- void (*ulp_stop)(void *);
- void (*ulp_start)(void *);
- void (*ulp_sriov_config)(void *, int);
- void (*ulp_shutdown)(void *);
void (*ulp_irq_stop)(void *);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -57,6 +53,7 @@ struct bnxt_ulp {
struct bnxt_en_dev {
struct net_device *net;
struct pci_dev *pdev;
+ struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
@@ -64,8 +61,10 @@ struct bnxt_en_dev {
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
- const struct bnxt_en_ops *en_ops;
- struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ #define BNXT_EN_FLAG_VF 0x10
+#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
+
+ struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
* bytes mapped by L2
* driver.
@@ -74,24 +73,19 @@ struct bnxt_en_dev {
* bytes mapped as non-
* cacheable.
*/
+ u16 chip_num;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ unsigned long en_state; /* Could be checked in
+ * RoCE driver suspend
+ * mode only. Will be
+ * updated in resume.
+ */
};
-struct bnxt_en_ops {
- int (*bnxt_register_device)(struct bnxt_en_dev *, unsigned int,
- struct bnxt_ulp_ops *, void *);
- int (*bnxt_unregister_device)(struct bnxt_en_dev *, unsigned int);
- int (*bnxt_request_msix)(struct bnxt_en_dev *, unsigned int,
- struct bnxt_msix_entry *, int);
- int (*bnxt_free_msix)(struct bnxt_en_dev *, unsigned int);
- int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, unsigned int,
- struct bnxt_fw_msg *);
- int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, unsigned int,
- unsigned long *, u16);
-};
-
-static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
- if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
+ if (edev && edev->ulp_tbl)
return true;
return false;
}
@@ -102,10 +96,15 @@ int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
-void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
-
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
+void bnxt_rdma_aux_device_init(struct bnxt *bp);
+int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
+ void *handle);
+void bnxt_unregister_dev(struct bnxt_en_dev *edev);
+int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
+int bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 21973046b12b..d937daa8ee88 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2316,6 +2316,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
__func__, p_index, ring->c_index,
ring->read_ptr, dma_length_status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, dev, "oversized packet\n");
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 41964fd02452..6e141a8bbf43 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4684,25 +4684,26 @@ static int init_reset_optional(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to init SGMII PHY\n");
- }
- ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
- if (!ret) {
- u32 pm_info[2];
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
- ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
- pm_info, ARRAY_SIZE(pm_info));
- if (ret) {
- dev_err(&pdev->dev, "Failed to read power management information\n");
- goto err_out_phy_exit;
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
}
- ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
- if (ret)
- goto err_out_phy_exit;
- ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
- if (ret)
- goto err_out_phy_exit;
}
/* Fully reset controller at hardware level if mapped in device tree */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index be96f1dc0372..d4a862a9fd7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -4,7 +4,7 @@
#ifndef __CXGB4_TC_MQPRIO_H__
#define __CXGB4_TC_MQPRIO_H__
-#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c
index c4c6e1357317..d083e6684f12 100644
--- a/drivers/net/ethernet/engleder/tsnep_tc.c
+++ b/drivers/net/ethernet/engleder/tsnep_tc.c
@@ -403,12 +403,33 @@ static int tsnep_taprio(struct tsnep_adapter *adapter,
return 0;
}
+static int tsnep_tc_query_caps(struct tsnep_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return tsnep_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_TAPRIO:
return tsnep_taprio(adapter, type_data);
default:
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 159ae740ba3c..2fc712b24d12 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -28,11 +28,9 @@ EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
- int i;
- for (i = 0; i < priv->num_rx_rings; i++)
- if (priv->rx_ring[i]->xdp.prog)
- return num_tx_rings - num_possible_cpus();
+ if (priv->xdp_prog)
+ return num_tx_rings - num_possible_cpus();
return num_tx_rings;
}
@@ -2456,7 +2454,6 @@ int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr_resource *tx_res, *rx_res;
- int num_stack_tx_queues;
bool extended;
int err;
@@ -2482,16 +2479,6 @@ int enetc_open(struct net_device *ndev)
goto err_alloc_rx;
}
- num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
-
- err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
- if (err)
- goto err_set_queues;
-
enetc_tx_onestep_tstamp_init(priv);
enetc_assign_tx_resources(priv, tx_res);
enetc_assign_rx_resources(priv, rx_res);
@@ -2500,8 +2487,6 @@ int enetc_open(struct net_device *ndev)
return 0;
-err_set_queues:
- enetc_free_rx_resources(rx_res, priv->num_rx_rings);
err_alloc_rx:
enetc_free_tx_resources(tx_res, priv->num_tx_rings);
err_alloc_tx:
@@ -2576,8 +2561,11 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
* without reconfiguration.
*/
if (!netif_running(priv->ndev)) {
- if (cb)
- cb(priv, ctx);
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -2598,8 +2586,11 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
enetc_free_rxtx_rings(priv);
/* Interface is down, run optional callback now */
- if (cb)
- cb(priv, ctx);
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ goto out_restart;
+ }
enetc_assign_tx_resources(priv, tx_res);
enetc_assign_rx_resources(priv, rx_res);
@@ -2608,76 +2599,123 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
return 0;
+out_restart:
+ enetc_setup_bdrs(priv, extended);
+ enetc_start(priv->ndev);
+ enetc_free_rx_resources(rx_res, priv->num_rx_rings);
out_free_tx_res:
enetc_free_tx_resources(tx_res, priv->num_tx_rings);
out:
return err;
}
-int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i,
+ priv->tx_ring[i]->prio);
+}
+
+static void enetc_reset_tc_mqprio(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct tc_mqprio_qopt *mqprio = type_data;
struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int num_stack_tx_queues;
- u8 num_tc;
int i;
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
- mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = mqprio->num_tc;
- if (!num_tc) {
- netdev_reset_tc(ndev);
- netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ priv->min_num_stack_tx_queues = num_possible_cpus();
- /* Reset all ring priorities to 0 */
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = 0;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
- }
+ /* Reset all ring priorities to 0 */
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+
+ enetc_debug_tx_ring_prios(priv);
+}
+
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
+ int num_stack_tx_queues = 0;
+ u8 num_tc = mqprio->num_tc;
+ struct enetc_bdr *tx_ring;
+ int offset, count;
+ int err, tc, q;
+ if (!num_tc) {
+ enetc_reset_tc_mqprio(ndev);
return 0;
}
- /* Check if we have enough BD rings available to accommodate all TCs */
- if (num_tc > num_stack_tx_queues) {
- netdev_err(ndev, "Max %d traffic classes supported\n",
- priv->num_tx_rings);
- return -EINVAL;
- }
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
- /* For the moment, we use only one BD ring per TC.
- *
- * Configure num_tc BD rings with increasing priorities.
- */
- for (i = 0; i < num_tc; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = i;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ for (tc = 0; tc < num_tc; tc++) {
+ offset = mqprio->offset[tc];
+ count = mqprio->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+
+ for (q = offset; q < offset + count; q++) {
+ tx_ring = priv->tx_ring[q];
+ /* The prio_tc_map is skb_tx_hash()'s way of selecting
+ * between TX queues based on skb->priority. As such,
+ * there's nothing to offload based on it.
+ * Make the mqprio "traffic class" be the priority of
+ * this ring group, and leave the Tx IPV to traffic
+ * class mapping as its default mapping value of 1:1.
+ */
+ tx_ring->prio = tc;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
}
- /* Reset the number of netdev queues based on the TC count */
- netif_set_real_num_tx_queues(ndev, num_tc);
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
- netdev_set_num_tc(ndev, num_tc);
+ priv->min_num_stack_tx_queues = num_stack_tx_queues;
- /* Each TC is associated with one netdev queue */
- for (i = 0; i < num_tc; i++)
- netdev_set_tc_queue(ndev, i, 1, i);
+ enetc_debug_tx_ring_prios(priv);
return 0;
+
+err_reset_tc:
+ enetc_reset_tc_mqprio(ndev);
+ return err;
}
EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
{
struct bpf_prog *old_prog, *prog = ctx;
- int i;
+ int num_stack_tx_queues;
+ int err, i;
old_prog = xchg(&priv->xdp_prog, prog);
+
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err) {
+ xchg(&priv->xdp_prog, old_prog);
+ return err;
+ }
+
if (old_prog)
bpf_prog_put(old_prog);
@@ -2698,9 +2736,20 @@ static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
+ int num_xdp_tx_queues = prog ? num_possible_cpus() : 0;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
bool extended;
+ if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
+ priv->num_tx_rings) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
+ num_xdp_tx_queues,
+ priv->min_num_stack_tx_queues,
+ priv->num_tx_rings);
+ return -EBUSY;
+ }
+
extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
/* The buffer layout is changing, so we need to drain the old
@@ -2898,6 +2947,7 @@ EXPORT_SYMBOL_GPL(enetc_ioctl);
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ int num_stack_tx_queues;
int first_xdp_tx_ring;
int i, n, err, nvec;
int v_tx_rings;
@@ -2974,6 +3024,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
}
}
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err)
+ goto fail;
+
+ err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
+ if (err)
+ goto fail;
+
+ priv->min_num_stack_tx_queues = num_possible_cpus();
first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 1fe8dfd6b6d4..8010f31cd10d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -314,7 +314,6 @@ struct psfp_cap {
};
#define ENETC_F_TX_TSTAMP_MASK 0xff
-/* TODO: more hardware offloads */
enum enetc_active_offloads {
/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
ENETC_F_TX_TSTAMP = BIT(0),
@@ -323,6 +322,7 @@ enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(8),
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
+ ENETC_F_QBU = BIT(11),
};
enum enetc_flags_bit {
@@ -369,6 +369,9 @@ struct enetc_ndev_priv {
struct psfp_cap psfp_cap;
+ /* Minimum number of TX queues required by the network stack */
+ unsigned int min_num_stack_tx_queues;
+
struct phylink *phylink;
int ic_mode;
u32 tx_ictt;
@@ -379,6 +382,11 @@ struct enetc_ndev_priv {
struct work_struct tx_onestep_tstamp;
struct sk_buff_head tx_skbs;
+
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates
+ */
+ struct mutex mm_lock;
};
/* Messaging */
@@ -424,6 +432,7 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
/* control buffer descriptor ring (CBDR) */
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 05e2bad609c6..bca68edfbe9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
+#include <linux/ethtool_netlink.h>
#include <linux/net_tstamp.h>
#include <linux/module.h>
#include "enetc.h"
@@ -299,14 +300,32 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
}
+static void enetc_pause_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_pause_stats *pause_stats)
+{
+ pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac));
+ pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac));
+}
+
static void enetc_get_pause_stats(struct net_device *ndev,
struct ethtool_pause_stats *pause_stats)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(0));
- pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(0));
+ switch (pause_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_pause_stats(hw, 0, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_pause_stats(hw, 1, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_pause_stats(ndev, pause_stats);
+ break;
+ }
}
static void enetc_mac_stats(struct enetc_hw *hw, int mac,
@@ -383,8 +402,20 @@ static void enetc_get_eth_mac_stats(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- enetc_mac_stats(hw, 0, mac_stats);
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_mac_stats(hw, 0, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mac_stats(hw, 1, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_mac_stats(ndev, mac_stats);
+ break;
+ }
}
static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
@@ -392,8 +423,20 @@ static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- enetc_ctrl_stats(hw, 0, ctrl_stats);
+ switch (ctrl_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_ctrl_stats(hw, 1, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_ctrl_stats(ndev, ctrl_stats);
+ break;
+ }
}
static void enetc_get_rmon_stats(struct net_device *ndev,
@@ -402,8 +445,20 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+ switch (rmon_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_rmon_stats(hw, 1, rmon_stats, ranges);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_rmon_stats(ndev, rmon_stats);
+ break;
+ }
}
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
@@ -863,6 +918,166 @@ static int enetc_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(priv->phylink, cmd);
}
+static void enetc_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return;
+
+ s->MACMergeFrameAssErrorCount = enetc_port_rd(hw, ENETC_MMFAECR);
+ s->MACMergeFrameSmdErrorCount = enetc_port_rd(hw, ENETC_MMFSECR);
+ s->MACMergeFrameAssOkCount = enetc_port_rd(hw, ENETC_MMFAOCR);
+ s->MACMergeFragCountRx = enetc_port_rd(hw, ENETC_MMFCRXR);
+ s->MACMergeFragCountTx = enetc_port_rd(hw, ENETC_MMFCTXR);
+ s->MACMergeHoldCount = enetc_port_rd(hw, ENETC_MMHCR);
+}
+
+static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ u32 lafs, rafs, val;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ state->pmac_enabled = !!(val & ENETC_PFPMR_PMACE);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ switch (ENETC_MMCSR_GET_VSTS(val)) {
+ case 0:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ break;
+ case 2:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ break;
+ case 3:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+ break;
+ case 4:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ break;
+ case 5:
+ default:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
+ break;
+ }
+
+ rafs = ENETC_MMCSR_GET_RAFS(val);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(rafs);
+ lafs = ENETC_MMCSR_GET_LAFS(val);
+ state->rx_min_frag_size = ethtool_mm_frag_size_add_to_min(lafs);
+ state->tx_enabled = !!(val & ENETC_MMCSR_LPE); /* mirror of MMCSR_ME */
+ state->tx_active = !!(val & ENETC_MMCSR_LPA);
+ state->verify_enabled = !(val & ENETC_MMCSR_VDIS);
+ state->verify_time = ENETC_MMCSR_GET_VT(val);
+ /* A verifyTime of 128 ms would exceed the 7 bit width
+ * of the ENETC_MMCSR_VT field
+ */
+ state->max_verify_time = 127;
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+ u32 val, add_frag_size;
+ int err;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &add_frag_size, extack);
+ if (err)
+ return err;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ if (cfg->pmac_enabled)
+ val |= ENETC_PFPMR_PMACE;
+ else
+ val &= ~ENETC_PFPMR_PMACE;
+ enetc_port_wr(hw, ENETC_PFPMR, val);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (cfg->verify_enabled)
+ val &= ~ENETC_MMCSR_VDIS;
+ else
+ val |= ENETC_MMCSR_VDIS;
+
+ if (cfg->tx_enabled)
+ priv->active_offloads |= ENETC_F_QBU;
+ else
+ priv->active_offloads &= ~ENETC_F_QBU;
+
+ /* If link is up, enable MAC Merge right away */
+ if (!!(priv->active_offloads & ENETC_F_QBU) &&
+ !(val & ENETC_MMCSR_LINK_FAIL))
+ val |= ENETC_MMCSR_ME;
+
+ val &= ~ENETC_MMCSR_VT_MASK;
+ val |= ENETC_MMCSR_VT(cfg->verify_time);
+
+ val &= ~ENETC_MMCSR_RAFS_MASK;
+ val |= ENETC_MMCSR_RAFS(add_frag_size);
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+/* When the link is lost, the verification state machine goes to the FAILED
+ * state and doesn't restart on its own after a new link up event.
+ * According to 802.3 Figure 99-8 - Verify state diagram, the LINK_FAIL bit
+ * should have been sufficient to re-trigger verification, but for ENETC it
+ * doesn't. As a workaround, we need to toggle the Merge Enable bit to
+ * re-trigger verification when link comes up.
+ */
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 val;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (link) {
+ val &= ~ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val |= ENETC_MMCSR_ME;
+ } else {
+ val |= ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val &= ~ENETC_MMCSR_ME;
+ }
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ mutex_unlock(&priv->mm_lock);
+}
+EXPORT_SYMBOL_GPL(enetc_mm_link_state_update);
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -893,6 +1108,9 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.set_wol = enetc_set_wol,
.get_pauseparam = enetc_get_pauseparam,
.set_pauseparam = enetc_set_pauseparam,
+ .get_mm = enetc_get_mm,
+ .set_mm = enetc_set_mm,
+ .get_mm_stats = enetc_get_mm_stats,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 041df7d0ae81..de2e0ee8cdcb 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -222,7 +222,30 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
#define ENETC_MMCSR 0x1f00
-#define ENETC_MMCSR_ME BIT(16)
+#define ENETC_MMCSR_LINK_FAIL BIT(31)
+#define ENETC_MMCSR_VT_MASK GENMASK(29, 23) /* Verify Time */
+#define ENETC_MMCSR_VT(x) (((x) << 23) & ENETC_MMCSR_VT_MASK)
+#define ENETC_MMCSR_GET_VT(x) (((x) & ENETC_MMCSR_VT_MASK) >> 23)
+#define ENETC_MMCSR_TXSTS_MASK GENMASK(22, 21) /* Merge Status */
+#define ENETC_MMCSR_GET_TXSTS(x) (((x) & ENETC_MMCSR_TXSTS_MASK) >> 21)
+#define ENETC_MMCSR_VSTS_MASK GENMASK(20, 18) /* Verify Status */
+#define ENETC_MMCSR_GET_VSTS(x) (((x) & ENETC_MMCSR_VSTS_MASK) >> 18)
+#define ENETC_MMCSR_VDIS BIT(17) /* Verify Disabled */
+#define ENETC_MMCSR_ME BIT(16) /* Merge Enabled */
+#define ENETC_MMCSR_RAFS_MASK GENMASK(9, 8) /* Remote Additional Fragment Size */
+#define ENETC_MMCSR_RAFS(x) (((x) << 8) & ENETC_MMCSR_RAFS_MASK)
+#define ENETC_MMCSR_GET_RAFS(x) (((x) & ENETC_MMCSR_RAFS_MASK) >> 8)
+#define ENETC_MMCSR_LAFS_MASK GENMASK(4, 3) /* Local Additional Fragment Size */
+#define ENETC_MMCSR_GET_LAFS(x) (((x) & ENETC_MMCSR_LAFS_MASK) >> 3)
+#define ENETC_MMCSR_LPA BIT(2) /* Local Preemption Active */
+#define ENETC_MMCSR_LPE BIT(1) /* Local Preemption Enabled */
+#define ENETC_MMCSR_LPS BIT(0) /* Local Preemption Supported */
+#define ENETC_MMFAECR 0x1f08
+#define ENETC_MMFSECR 0x1f0c
+#define ENETC_MMFAOCR 0x1f10
+#define ENETC_MMFCRXR 0x1f14
+#define ENETC_MMFCTXR 0x1f18
+#define ENETC_MMHCR 0x1f1c
#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
#define ENETC_PMAC_OFFSET 0x1000
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 6b54071d4ecc..7cd22d370caa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1086,6 +1086,9 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, cmd_cfg);
enetc_mac_enable(si, true);
+
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, true);
}
static void enetc_pl_mac_link_down(struct phylink_config *config,
@@ -1093,8 +1096,15 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
phy_interface_t interface)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_si *si = pf->si;
+ struct enetc_ndev_priv *priv;
- enetc_mac_enable(pf->si, false);
+ priv = netdev_priv(si->ndev);
+
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, false);
+
+ enetc_mac_enable(si, false);
}
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
@@ -1287,6 +1297,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
priv = netdev_priv(ndev);
+ mutex_init(&priv->mm_lock);
+
enetc_init_si_rings_params(priv);
err = enetc_alloc_si_resources(priv);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index fcebb54224c0..130ebf6853e6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -136,29 +136,21 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
{
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- struct enetc_bdr *tx_ring;
- int err;
- int i;
+ int err, i;
/* TSD and Qbv are mutually exclusive in hardware */
for (i = 0; i < priv->num_tx_rings; i++)
if (priv->tx_ring[i]->tsd_enable)
return -EBUSY;
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = taprio->enable ? i : 0;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
- }
+ err = enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
+ if (err)
+ return err;
err = enetc_setup_taprio(ndev, taprio);
if (err) {
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = taprio->enable ? 0 : i;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
- }
+ taprio->mqprio.qopt.num_tc = 0;
+ enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
}
return err;
@@ -1611,6 +1603,13 @@ int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
struct enetc_si *si = priv->si;
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 5ff45b1a74a5..c73e25f8995e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -56,12 +56,12 @@
#include <linux/fec.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/prefetch.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -4035,10 +4035,10 @@ free_queue_mem:
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
- int err, phy_reset;
- bool active_high = false;
+ struct gpio_desc *phy_reset;
int msec = 1, phy_post_delay = 0;
struct device_node *np = pdev->dev.of_node;
+ int err;
if (!np)
return 0;
@@ -4048,33 +4048,26 @@ static int fec_reset_phy(struct platform_device *pdev)
if (!err && msec > 1000)
msec = 1;
- phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- if (phy_reset == -EPROBE_DEFER)
- return phy_reset;
- else if (!gpio_is_valid(phy_reset))
- return 0;
-
err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
/* valid reset duration should be less than 1s */
if (!err && phy_post_delay > 1000)
return -EINVAL;
- active_high = of_property_read_bool(np, "phy-reset-active-high");
+ phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(phy_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
+ "failed to get phy-reset-gpios\n");
- err = devm_gpio_request_one(&pdev->dev, phy_reset,
- active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "phy-reset");
- if (err) {
- dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
- return err;
- }
+ if (!phy_reset)
+ return 0;
if (msec > 20)
msleep(msec);
else
usleep_range(msec * 1000, msec * 1000 + 1000);
- gpio_set_value_cansleep(phy_reset, !active_high);
+ gpiod_set_value_cansleep(phy_reset, 0);
if (!phy_post_delay)
return 0;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 9349f841bd06..587ad81a2dc3 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1055,6 +1055,9 @@ static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node,
return ERR_PTR(-EPROBE_DEFER);
pcs = lynx_pcs_create(mdiodev);
+ if (!pcs)
+ mdio_device_free(mdiodev);
+
return pcs;
}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 5b40f9c53196..07111c241e0e 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -327,7 +327,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
static int gve_alloc_notify_blocks(struct gve_priv *priv)
{
int num_vecs_requested = priv->num_ntfy_blks + 1;
- char *name = priv->dev->name;
unsigned int active_cpus;
int vecs_enabled;
int i, j;
@@ -371,8 +370,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
/* Setup Management Vector - the last vector */
- snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
- name);
+ snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
+ pci_name(priv->pdev));
err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
if (err) {
@@ -401,8 +400,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
struct gve_notify_block *block = &priv->ntfy_blocks[i];
int msix_idx = i;
- snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
- name, i);
+ snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
+ i, pci_name(priv->pdev));
block->priv = priv;
err = request_irq(priv->msix_vectors[msix_idx].vector,
gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 17137de9338c..40f4306449eb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -32,6 +32,7 @@
#include <linux/pkt_sched.h>
#include <linux/types.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#define HNAE3_MOD_VERSION "1.0"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b4c4fb873568..25be7f8ac7cd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -20,6 +20,7 @@
#include <net/gro.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index 3d3b69605423..9a939c0b217f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -114,7 +114,6 @@ int hclge_devlink_init(struct hclge_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index a6c3c5e8f0ab..1b535142c65a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -116,7 +116,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e19a6bb3f444..146ca1d8031b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -250,10 +250,11 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
struct ibmvnic_sub_crq_queue *queue;
- int num_rxqs = adapter->num_active_rx_scrqs;
- int num_txqs = adapter->num_active_tx_scrqs;
+ int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
+ int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
unsigned int num_cpu, cpu;
+ bool is_rx_queue;
int rc = 0;
netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
@@ -273,14 +274,24 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
/* next available cpu to assign irq to */
cpu = cpumask_next(-1, cpu_online_mask);
- for (i = 0; i < num_txqs; i++) {
- queue = txqs[i];
+ for (i = 0; i < total_queues; i++) {
+ is_rx_queue = false;
+ /* balance core load by alternating rx and tx assignments
+ * ex: TX0 -> RX0 -> TX1 -> RX1 etc.
+ */
+ if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) {
+ queue = rxqs[i_rxqs++];
+ is_rx_queue = true;
+ } else {
+ queue = txqs[i_txqs++];
+ }
+
rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
stride);
if (rc)
goto out;
- if (!queue)
+ if (!queue || is_rx_queue)
continue;
rc = __netif_set_xps_queue(adapter->netdev,
@@ -291,14 +302,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
__func__, i, rc);
}
- for (i = 0; i < num_rxqs; i++) {
- queue = rxqs[i];
- rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
- stride);
- if (rc)
- goto out;
- }
-
out:
if (rc) {
netdev_warn(adapter->netdev,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 04acd1a992fa..e1eb1de88bf9 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7418,9 +7418,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_pci_reg;
- /* AER (Advanced Error Reporting) hooks */
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
/* PCI config space info */
err = pci_save_state(pdev);
@@ -7708,7 +7705,6 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -7775,9 +7771,6 @@ static void e1000_remove(struct pci_dev *pdev)
free_netdev(netdev);
- /* AER disable */
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index b473cb7d7c57..027d721feb18 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2127,8 +2127,6 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -2227,7 +2225,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -2281,8 +2278,6 @@ static void fm10k_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 60e351665c70..38c341b9f368 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -33,6 +33,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/udp_tunnel.h>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 8a79cc18c428..3ee00c3bc319 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -15595,7 +15595,6 @@ err_switch_setup:
timer_shutdown_sync(&pf->service_timer);
i40e_shutdown_adminq(hw);
iounmap(hw->hw_addr);
- pci_disable_pcie_error_reporting(pf->pdev);
pci_release_mem_regions(pf->pdev);
pci_disable_device(pf->pdev);
kfree(pf);
@@ -15666,7 +15665,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
/* Now that we have a PCI connection, we need to do the
@@ -16224,7 +16222,6 @@ err_pf_reset:
err_ioremap:
kfree(pf);
err_pf_alloc:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -16372,7 +16369,6 @@ unmap:
kfree(pf);
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 23bc000e77b8..232bc61d9eee 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -30,6 +30,7 @@
#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 2c4480b20db3..3273aeb8fa67 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -4868,8 +4868,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
@@ -4957,7 +4955,6 @@ err_ioremap:
err_alloc_wq:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:
@@ -5175,8 +5172,6 @@ static void iavf_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index ae93ae488bc2..7bd50e49312c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -41,6 +41,7 @@
#include <linux/dim.h>
#include <linux/gnss.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_gact.h>
#include <net/ip.h>
@@ -353,7 +354,6 @@ struct ice_vsi {
struct ice_vf *vf; /* VF associated with this VSI */
- u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr;
u16 num_bfltr;
@@ -880,7 +880,7 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
void
@@ -889,7 +889,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
-int ice_vsi_cfg(struct ice_vsi *vsi);
+int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
@@ -907,6 +907,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
+void ice_deinit_rdma(struct ice_pf *pf);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
@@ -931,6 +932,8 @@ int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
+int ice_load(struct ice_pf *pf);
+void ice_unload(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index f5842ff42bc7..1b79bb0a4fcd 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1088,8 +1088,10 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*hw->port_info), GFP_KERNEL);
+ if (!hw->port_info)
+ hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*hw->port_info),
+ GFP_KERNEL);
if (!hw->port_info) {
status = -ENOMEM;
goto err_unroll_cqinit;
@@ -1217,11 +1219,6 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
- if (hw->port_info) {
- devm_kfree(ice_hw_to_dev(hw), hw->port_info);
- hw->port_info = NULL;
- }
-
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
ice_destroy_all_ctrlq(hw);
@@ -5537,7 +5534,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
* returned by the firmware is a 16 bit * value, but is indexed
* by [fls(speed) - 1]
*/
-static const u32 ice_aq_to_link_speed[15] = {
+static const u32 ice_aq_to_link_speed[] = {
SPEED_10, /* BIT(0) */
SPEED_100,
SPEED_1000,
@@ -5549,10 +5546,6 @@ static const u32 ice_aq_to_link_speed[15] = {
SPEED_40000,
SPEED_50000,
SPEED_100000, /* BIT(10) */
- 0,
- 0,
- 0,
- 0 /* BIT(14) */
};
/**
@@ -5563,5 +5556,8 @@ static const u32 ice_aq_to_link_speed[15] = {
*/
u32 ice_get_link_speed(u16 index)
{
+ if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
+ return 0;
+
return ice_aq_to_link_speed[index];
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a97b137e21c0..c6d4926f0fcf 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -441,7 +441,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto out;
}
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
out:
/* enable previously downed VSIs */
@@ -731,12 +731,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
/**
* ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
* @pf: pointer to the PF struct
+ * @locked: is adev device lock held
*
* Assumed caller has already disabled all VSIs before
* calling this function. Reconfiguring DCB based on
* local_dcbx_cfg.
*/
-void ice_pf_dcb_recfg(struct ice_pf *pf)
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct iidc_event *event;
@@ -783,14 +784,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- /* Notify the AUX drivers that TC change is finished */
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return;
-
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ if (!locked) {
+ /* Notify the AUX drivers that TC change is finished */
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return;
+
+ set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
}
/**
@@ -1078,7 +1081,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
/* changes in configuration update VSI */
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
/* enable previously downed VSIs */
ice_dcb_ena_dis_vsi(pf, true, true);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 4c421c842a13..800879a88c5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
-void ice_pf_dcb_recfg(struct ice_pf *pf);
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
@@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
return 0;
}
-static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
+static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index ce753d23aba9..63acb7b96053 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -371,10 +371,7 @@ out_free_ctx:
/**
* ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
- * @devlink: pointer to the devlink instance to reload
- * @netns_change: if true, the network namespace is changing
- * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
- * @limit: limits on what reload should do, such as not resetting
+ * @pf: pointer to the pf instance
* @extack: netlink extended ACK structure
*
* Allow user to activate new Embedded Management Processor firmware by
@@ -387,12 +384,9 @@ out_free_ctx:
* any source.
*/
static int
-ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
- enum devlink_reload_action action,
- enum devlink_reload_limit limit,
+ice_devlink_reload_empr_start(struct ice_pf *pf,
struct netlink_ext_ack *extack)
{
- struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
u8 pending;
@@ -431,11 +425,51 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
}
/**
+ * ice_devlink_reload_down - prepare for reload
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform
+ * @limit: limits on what reload should do, such as not resetting
+ * @extack: netlink extended ACK structure
+ */
+static int
+ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Go to legacy mode before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ if (ice_is_adq_active(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Turn off ADQ before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ if (ice_has_vfs(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Remove all VFs before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ ice_unload(pf);
+ return 0;
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ return ice_devlink_reload_empr_start(pf, extack);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* ice_devlink_reload_empr_finish - Wait for EMP reset to finish
- * @devlink: pointer to the devlink instance reloading
- * @action: the action requested
- * @limit: limits imposed by userspace, such as not resetting
- * @actions_performed: on return, indicate what actions actually performed
+ * @pf: pointer to the pf instance
* @extack: netlink extended ACK structure
*
* Wait for driver to finish rebuilding after EMP reset is completed. This
@@ -443,17 +477,11 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
* for the driver's rebuild to complete.
*/
static int
-ice_devlink_reload_empr_finish(struct devlink *devlink,
- enum devlink_reload_action action,
- enum devlink_reload_limit limit,
- u32 *actions_performed,
+ice_devlink_reload_empr_finish(struct ice_pf *pf,
struct netlink_ext_ack *extack)
{
- struct ice_pf *pf = devlink_priv(devlink);
int err;
- *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
-
err = ice_wait_for_reset(pf, 60 * HZ);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
@@ -1192,12 +1220,43 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+/**
+ * ice_devlink_reload_up - do reload up after reinit
+ * @devlink: pointer to the devlink instance reloading
+ * @action: the action requested
+ * @limit: limits imposed by userspace, such as not resetting
+ * @actions_performed: on return, indicate what actions actually performed
+ * @extack: netlink extended ACK structure
+ */
+static int
+ice_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ return ice_load(pf);
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+ return ice_devlink_reload_empr_finish(pf, extack);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
- .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
/* The ice driver currently does not support driver reinit */
- .reload_down = ice_devlink_reload_empr_start,
- .reload_up = ice_devlink_reload_empr_finish,
+ .reload_down = ice_devlink_reload_down,
+ .reload_up = ice_devlink_reload_up,
.port_split = ice_devlink_port_split,
.port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get,
@@ -1376,7 +1435,6 @@ void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index f9f15acae90a..f6dd3f8fd936 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -71,17 +71,17 @@ void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
if (!ice_is_switchdev_running(vf->pf))
return;
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
- vf->hw_lan_addr.addr);
+ vf->hw_lan_addr);
if (err) {
dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
- vf->hw_lan_addr.addr, vf->vf_id, err);
+ vf->hw_lan_addr, vf->vf_id, err);
return;
}
vf->num_mac++;
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
}
}
@@ -237,7 +237,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
netif_napi_del(&vf->repr->q_vector->napi);
@@ -265,14 +265,14 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
GFP_KERNEL);
if (!vf->repr->dst) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
goto err;
}
if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
@@ -281,7 +281,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(vsi)) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
@@ -338,7 +338,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
vsi->vf->vf_id);
}
@@ -425,7 +425,13 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_SWITCHDEV_CTRL;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1adbcb4786c2..b360bd8f1599 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -656,7 +656,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_setup_rx_ring;
- status = ice_vsi_cfg(vsi);
+ status = ice_vsi_cfg_lan(vsi);
if (status)
goto err_setup_rx_ring;
@@ -3658,7 +3658,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int new_rx = 0, new_tx = 0;
+ bool locked = false;
u32 curr_combined;
+ int ret = 0;
/* do not support changing channels in Safe Mode */
if (ice_is_safe_mode(pf)) {
@@ -3722,15 +3724,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- ice_vsi_recfg_qs(vsi, new_rx, new_tx);
+ if (pf->adev) {
+ mutex_lock(&pf->adev_mutex);
+ device_lock(&pf->adev->dev);
+ locked = true;
+ if (pf->adev->dev.driver) {
+ netdev_err(dev, "Cannot change channels when RDMA is active\n");
+ ret = -EBUSY;
+ goto adev_unlock;
+ }
+ }
+
+ ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
- if (!netif_is_rxfh_configured(dev))
- return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ if (!netif_is_rxfh_configured(dev)) {
+ ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ goto adev_unlock;
+ }
/* Update rss_size due to change in Rx queues */
vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
- return 0;
+adev_unlock:
+ if (locked) {
+ device_unlock(&pf->adev->dev);
+ mutex_unlock(&pf->adev_mutex);
+ }
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 40e678cfb507..aff7a141c30d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -208,6 +208,11 @@ static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
void ice_fltr_remove_all(struct ice_vsi *vsi)
{
ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+ /* sync netdev filters if exist */
+ if (vsi->netdev) {
+ __dev_uc_unsync(vsi->netdev, NULL);
+ __dev_mc_unsync(vsi->netdev, NULL);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 895c32bcc8b5..e6bc2285071e 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -6,6 +6,8 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+static DEFINE_XARRAY_ALLOC1(ice_aux_id);
+
/**
* ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
* @pf: pointer to PF struct
@@ -246,6 +248,17 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf)
}
/**
+ * ice_free_rdma_qvector - free vector resources reserved for RDMA driver
+ * @pf: board private structure to initialize
+ */
+static void ice_free_rdma_qvector(struct ice_pf *pf)
+{
+ pf->num_avail_sw_msix -= pf->num_rdma_msix;
+ ice_free_res(pf->irq_tracker, pf->rdma_base_vector,
+ ICE_RES_RDMA_VEC_ID);
+}
+
+/**
* ice_adev_release - function to be mapped to AUX dev's release op
* @dev: pointer to device to free
*/
@@ -331,12 +344,48 @@ int ice_init_rdma(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev;
int ret;
+ if (!ice_is_rdma_ena(pf)) {
+ dev_warn(dev, "RDMA is not supported on this device\n");
+ return 0;
+ }
+
+ ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
+ GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "Failed to allocate device ID for AUX driver\n");
+ return -ENOMEM;
+ }
+
/* Reserve vector resources */
ret = ice_reserve_rdma_qvector(pf);
if (ret < 0) {
dev_err(dev, "failed to reserve vectors for RDMA\n");
- return ret;
+ goto err_reserve_rdma_qvector;
}
pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
- return ice_plug_aux_dev(pf);
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ goto err_plug_aux_dev;
+ return 0;
+
+err_plug_aux_dev:
+ ice_free_rdma_qvector(pf);
+err_reserve_rdma_qvector:
+ pf->adev = NULL;
+ xa_erase(&ice_aux_id, pf->aux_idx);
+ return ret;
+}
+
+/**
+ * ice_deinit_rdma - deinitialize RDMA on PF
+ * @pf: ptr to ice_pf
+ */
+void ice_deinit_rdma(struct ice_pf *pf)
+{
+ if (!ice_is_rdma_ena(pf))
+ return;
+
+ ice_unplug_aux_dev(pf);
+ ice_free_rdma_qvector(pf);
+ xa_erase(&ice_aux_id, pf->aux_idx);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 4a35479bd7dd..37fe639712e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -166,14 +166,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
- * @vf: the VF associated with this VSI, if any
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
{
enum ice_vsi_type vsi_type = vsi->type;
struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = vsi->vf;
if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
return;
@@ -282,10 +282,10 @@ static int ice_get_free_slot(void *array, int size, int curr)
}
/**
- * ice_vsi_delete - delete a VSI from the switch
+ * ice_vsi_delete_from_hw - delete a VSI from the switch
* @vsi: pointer to VSI being removed
*/
-void ice_vsi_delete(struct ice_vsi *vsi)
+static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
@@ -348,47 +348,144 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
}
/**
- * ice_vsi_clear - clean up and deallocate the provided VSI
+ * ice_vsi_free_stats - Free the ring statistics structures
+ * @vsi: VSI pointer
+ */
+static void ice_vsi_free_stats(struct ice_vsi *vsi)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ ice_for_each_alloc_txq(vsi, i) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+ }
+ }
+
+ ice_for_each_alloc_rxq(vsi, i) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+
+ kfree(vsi_stat->tx_ring_stats);
+ kfree(vsi_stat->rx_ring_stats);
+ kfree(vsi_stat);
+ pf->vsi_stats[vsi->idx] = NULL;
+}
+
+/**
+ * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
+ * @vsi: VSI which is having stats allocated
+ */
+static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
+{
+ struct ice_ring_stats **tx_ring_stats;
+ struct ice_ring_stats **rx_ring_stats;
+ struct ice_vsi_stats *vsi_stats;
+ struct ice_pf *pf = vsi->back;
+ u16 i;
+
+ vsi_stats = pf->vsi_stats[vsi->idx];
+ tx_ring_stats = vsi_stats->tx_ring_stats;
+ rx_ring_stats = vsi_stats->rx_ring_stats;
+
+ /* Allocate Tx ring stats */
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_tx_ring *ring;
+
+ ring = vsi->tx_rings[i];
+ ring_stats = tx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(tx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ /* Allocate Rx ring stats */
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_rx_ring *ring;
+
+ ring = vsi->rx_rings[i];
+ ring_stats = rx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(rx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ return 0;
+
+err_out:
+ ice_vsi_free_stats(vsi);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_free - clean up and deallocate the provided VSI
* @vsi: pointer to VSI being cleared
*
* This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
- *
- * Returns 0 on success, negative on failure
*/
-int ice_vsi_clear(struct ice_vsi *vsi)
+static void ice_vsi_free(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
- if (!vsi)
- return 0;
-
- if (!vsi->back)
- return -EINVAL;
+ if (!vsi || !vsi->back)
+ return;
pf = vsi->back;
dev = ice_pf_to_dev(pf);
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
- return -EINVAL;
+ return;
}
mutex_lock(&pf->sw_mutex);
/* updates the PF for this cleared VSI */
pf->vsi[vsi->idx] = NULL;
- if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
- pf->next_vsi = vsi->idx;
- if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf)
- pf->next_vsi = vsi->idx;
+ pf->next_vsi = vsi->idx;
+ ice_vsi_free_stats(vsi);
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
devm_kfree(dev, vsi);
+}
- return 0;
+void ice_vsi_delete(struct ice_vsi *vsi)
+{
+ ice_vsi_delete_from_hw(vsi);
+ ice_vsi_free(vsi);
}
/**
@@ -461,6 +558,10 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
if (!pf->vsi_stats)
return -ENOENT;
+ if (pf->vsi_stats[vsi->idx])
+ /* realloc will happen in rebuild path */
+ return 0;
+
vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
if (!vsi_stat)
return -ENOMEM;
@@ -491,128 +592,92 @@ err_alloc_tx:
}
/**
- * ice_vsi_alloc - Allocates the next available struct VSI in the PF
- * @pf: board private structure
- * @vsi_type: type of VSI
+ * ice_vsi_alloc_def - set default values for already allocated VSI
+ * @vsi: ptr to VSI
* @ch: ptr to channel
- * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL
- *
- * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL,
- * it may be NULL in the case there is no association with a VF. For
- * ICE_VSI_VF the VF pointer *must not* be NULL.
- *
- * returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
- struct ice_channel *ch, struct ice_vf *vf)
+static int
+ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_vsi *vsi = NULL;
-
- if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
- return NULL;
-
- /* Need to protect the allocation of the VSIs at the PF level */
- mutex_lock(&pf->sw_mutex);
-
- /* If we have already allocated our maximum number of VSIs,
- * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
- * is available to be populated
- */
- if (pf->next_vsi == ICE_NO_VSI) {
- dev_dbg(dev, "out of VSI slots!\n");
- goto unlock_pf;
+ if (vsi->type != ICE_VSI_CHNL) {
+ ice_vsi_set_num_qs(vsi);
+ if (ice_vsi_alloc_arrays(vsi))
+ return -ENOMEM;
}
- vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
- if (!vsi)
- goto unlock_pf;
-
- vsi->type = vsi_type;
- vsi->back = pf;
- set_bit(ICE_VSI_DOWN, vsi->state);
-
- if (vsi_type == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf);
- else if (vsi_type != ICE_VSI_CHNL)
- ice_vsi_set_num_qs(vsi, NULL);
-
switch (vsi->type) {
case ICE_VSI_SWITCHDEV_CTRL:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup eswitch MSIX irq handler for VSI */
vsi->irq_handler = ice_eswitch_msix_clean_rings;
break;
case ICE_VSI_PF:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
case ICE_VSI_CTRL:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi;
-
- /* For the PF control VSI this is NULL, for the VF control VSI
- * this will be the first VF to allocate it.
- */
- vsi->vf = vf;
- break;
- case ICE_VSI_VF:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
- vsi->vf = vf;
break;
case ICE_VSI_CHNL:
if (!ch)
- goto err_rings;
+ return -EINVAL;
+
vsi->num_rxq = ch->num_rxq;
vsi->num_txq = ch->num_txq;
vsi->next_base_q = ch->base_q;
break;
- case ICE_VSI_LB:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
+ case ICE_VSI_VF:
break;
default:
- dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
- goto unlock_pf;
+ ice_vsi_free_arrays(vsi);
+ return -EINVAL;
}
- if (vsi->type == ICE_VSI_CTRL && !vf) {
- /* Use the last VSI slot as the index for PF control VSI */
- vsi->idx = pf->num_alloc_vsi - 1;
- pf->ctrl_vsi_idx = vsi->idx;
- pf->vsi[vsi->idx] = vsi;
- } else {
- /* fill slot and make note of the index */
- vsi->idx = pf->next_vsi;
- pf->vsi[pf->next_vsi] = vsi;
+ return 0;
+}
- /* prepare pf->next_vsi for next use */
- pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
- pf->next_vsi);
+/**
+ * ice_vsi_alloc - Allocates the next available struct VSI in the PF
+ * @pf: board private structure
+ *
+ * Reserves a VSI index from the PF and allocates an empty VSI structure
+ * without a type. The VSI structure must later be initialized by calling
+ * ice_vsi_cfg().
+ *
+ * returns a pointer to a VSI on success, NULL on failure.
+ */
+static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi = NULL;
+
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->sw_mutex);
+
+ /* If we have already allocated our maximum number of VSIs,
+ * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
+ * is available to be populated
+ */
+ if (pf->next_vsi == ICE_NO_VSI) {
+ dev_dbg(dev, "out of VSI slots!\n");
+ goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL && vf)
- vf->ctrl_vsi_idx = vsi->idx;
+ vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
+ if (!vsi)
+ goto unlock_pf;
- /* allocate memory for Tx/Rx ring stat pointers */
- if (ice_vsi_alloc_stat_arrays(vsi))
- goto err_rings;
+ vsi->back = pf;
+ set_bit(ICE_VSI_DOWN, vsi->state);
- goto unlock_pf;
+ /* fill slot and make note of the index */
+ vsi->idx = pf->next_vsi;
+ pf->vsi[pf->next_vsi] = vsi;
+
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
-err_rings:
- devm_kfree(dev, vsi);
- vsi = NULL;
unlock_pf:
mutex_unlock(&pf->sw_mutex);
return vsi;
@@ -1177,12 +1242,15 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
- * @init_vsi: is this call creating a VSI
+ * @vsi_flags: VSI configuration flags
+ *
+ * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
+ * reconfigure an existing context.
*
* This initializes a VSI context depending on the VSI type to be added and
* passes it down to the add_vsi aq command to create a new VSI.
*/
-static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
+static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -1244,7 +1312,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
/* if updating VSI context, make sure to set valid_section:
* to indicate which section of VSI context being updated
*/
- if (!init_vsi)
+ if (!(vsi_flags & ICE_VSI_FLAG_INIT))
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
}
@@ -1257,7 +1325,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (ret)
goto out;
- if (!init_vsi) /* means VSI being updated */
+ if (!(vsi_flags & ICE_VSI_FLAG_INIT))
+ /* means VSI being updated */
/* must to indicate which section of VSI context are
* being modified
*/
@@ -1272,7 +1341,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
}
- if (init_vsi) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(dev, "Add VSI failed, err %d\n", ret);
@@ -1436,7 +1505,7 @@ static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
- * This should only be called after ice_vsi_alloc() which allocates the
+ * This should only be called after ice_vsi_alloc_def() which allocates the
* corresponding SW VSI structure and initializes num_queue_pairs for the
* newly allocated VSI.
*
@@ -1584,106 +1653,6 @@ err_out:
}
/**
- * ice_vsi_free_stats - Free the ring statistics structures
- * @vsi: VSI pointer
- */
-static void ice_vsi_free_stats(struct ice_vsi *vsi)
-{
- struct ice_vsi_stats *vsi_stat;
- struct ice_pf *pf = vsi->back;
- int i;
-
- if (vsi->type == ICE_VSI_CHNL)
- return;
- if (!pf->vsi_stats)
- return;
-
- vsi_stat = pf->vsi_stats[vsi->idx];
- if (!vsi_stat)
- return;
-
- ice_for_each_alloc_txq(vsi, i) {
- if (vsi_stat->tx_ring_stats[i]) {
- kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
- WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
- }
- }
-
- ice_for_each_alloc_rxq(vsi, i) {
- if (vsi_stat->rx_ring_stats[i]) {
- kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
- WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
- }
- }
-
- kfree(vsi_stat->tx_ring_stats);
- kfree(vsi_stat->rx_ring_stats);
- kfree(vsi_stat);
- pf->vsi_stats[vsi->idx] = NULL;
-}
-
-/**
- * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
- * @vsi: VSI which is having stats allocated
- */
-static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
-{
- struct ice_ring_stats **tx_ring_stats;
- struct ice_ring_stats **rx_ring_stats;
- struct ice_vsi_stats *vsi_stats;
- struct ice_pf *pf = vsi->back;
- u16 i;
-
- vsi_stats = pf->vsi_stats[vsi->idx];
- tx_ring_stats = vsi_stats->tx_ring_stats;
- rx_ring_stats = vsi_stats->rx_ring_stats;
-
- /* Allocate Tx ring stats */
- ice_for_each_alloc_txq(vsi, i) {
- struct ice_ring_stats *ring_stats;
- struct ice_tx_ring *ring;
-
- ring = vsi->tx_rings[i];
- ring_stats = tx_ring_stats[i];
-
- if (!ring_stats) {
- ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
- if (!ring_stats)
- goto err_out;
-
- WRITE_ONCE(tx_ring_stats[i], ring_stats);
- }
-
- ring->ring_stats = ring_stats;
- }
-
- /* Allocate Rx ring stats */
- ice_for_each_alloc_rxq(vsi, i) {
- struct ice_ring_stats *ring_stats;
- struct ice_rx_ring *ring;
-
- ring = vsi->rx_rings[i];
- ring_stats = rx_ring_stats[i];
-
- if (!ring_stats) {
- ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
- if (!ring_stats)
- goto err_out;
-
- WRITE_ONCE(rx_ring_stats[i], ring_stats);
- }
-
- ring->ring_stats = ring_stats;
- }
-
- return 0;
-
-err_out:
- ice_vsi_free_stats(vsi);
- return -ENOMEM;
-}
-
-/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
@@ -2641,54 +2610,97 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
}
/**
- * ice_vsi_setup - Set up a VSI by a given type
- * @pf: board private structure
- * @pi: pointer to the port_info instance
- * @vsi_type: VSI type
- * @vf: pointer to VF to which this VSI connects. This field is used primarily
- * for the ICE_VSI_VF type. Other VSI types should pass NULL.
- * @ch: ptr to channel
- *
- * This allocates the sw VSI structure and its queue resources.
+ * ice_free_vf_ctrl_res - Free the VF control VSI resource
+ * @pf: pointer to PF structure
+ * @vsi: the VSI to free resources for
*
- * Returns pointer to the successfully allocated and configured VSI sw struct on
- * success, NULL on failure.
+ * Check if the VF control VSI resource is still in use. If no VF is using it
+ * any more, release the VSI resource. Otherwise, leave it to be cleaned up
+ * once no other VF uses it.
*/
-struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, struct ice_vf *vf,
- struct ice_channel *ch)
+static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+
+ /* No other VFs left that have control VSI. It is now safe to reclaim
+ * SW interrupts back to the common pool.
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+}
+
+static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
- struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_CHNL)
- vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL);
- else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf);
- else
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL);
+ /* configure VSI nodes based on number of queues and TC's */
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i)))
+ continue;
- if (!vsi) {
- dev_err(dev, "could not allocate VSI\n");
- return NULL;
+ if (vsi->type == ICE_VSI_CHNL) {
+ if (!vsi->alloc_txq && vsi->num_txq)
+ max_txqs[i] = vsi->num_txq;
+ else
+ max_txqs[i] = pf->num_lan_tx;
+ } else {
+ max_txqs[i] = vsi->alloc_txq;
+ }
}
- vsi->port_info = pi;
+ dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_err(dev, "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_cfg_def - configure default VSI based on the type
+ * @vsi: pointer to VSI
+ * @params: the parameters to configure this VSI with
+ */
+static int
+ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_pf *pf = vsi->back;
+ int ret;
+
vsi->vsw = pf->first_sw;
- if (vsi->type == ICE_VSI_PF)
- vsi->ethtype = ETH_P_PAUSE;
+
+ ret = ice_vsi_alloc_def(vsi, params->ch);
+ if (ret)
+ return ret;
+
+ /* allocate memory for Tx/Rx ring stat pointers */
+ if (ice_vsi_alloc_stat_arrays(vsi))
+ goto unroll_vsi_alloc;
ice_alloc_fd_res(vsi);
- if (vsi_type != ICE_VSI_CHNL) {
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto unroll_vsi_alloc;
- }
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_vsi_alloc_stat;
}
/* set RSS capabilities */
@@ -2698,7 +2710,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi, true);
+ ret = ice_vsi_init(vsi, params->flags);
if (ret)
goto unroll_get_qs;
@@ -2729,6 +2741,14 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ ret = ice_vsi_determine_xdp_res(vsi);
+ if (ret)
+ goto unroll_vector_base;
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ if (ret)
+ goto unroll_vector_base;
+ }
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
@@ -2793,30 +2813,156 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vsi_init;
}
- /* configure VSI nodes based on number of queues and TC's */
- ice_for_each_traffic_class(i) {
- if (!(vsi->tc_cfg.ena_tc & BIT(i)))
- continue;
+ return 0;
- if (vsi->type == ICE_VSI_CHNL) {
- if (!vsi->alloc_txq && vsi->num_txq)
- max_txqs[i] = vsi->num_txq;
- else
- max_txqs[i] = pf->num_lan_tx;
+unroll_vector_base:
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+unroll_alloc_q_vector:
+ ice_vsi_free_q_vectors(vsi);
+unroll_vsi_init:
+ ice_vsi_delete_from_hw(vsi);
+unroll_get_qs:
+ ice_vsi_put_qs(vsi);
+unroll_vsi_alloc_stat:
+ ice_vsi_free_stats(vsi);
+unroll_vsi_alloc:
+ ice_vsi_free_arrays(vsi);
+ return ret;
+}
+
+/**
+ * ice_vsi_cfg - configure a previously allocated VSI
+ * @vsi: pointer to VSI
+ * @params: parameters used to configure this VSI
+ */
+int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+{
+ struct ice_pf *pf = vsi->back;
+ int ret;
+
+ if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
+ return -EINVAL;
+
+ vsi->type = params->type;
+ vsi->port_info = params->pi;
+
+ /* For VSIs which don't have a connected VF, this will be NULL */
+ vsi->vf = params->vf;
+
+ ret = ice_vsi_cfg_def(vsi, params);
+ if (ret)
+ return ret;
+
+ ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
+ if (ret)
+ ice_vsi_decfg(vsi);
+
+ if (vsi->type == ICE_VSI_CTRL) {
+ if (vsi->vf) {
+ WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
+ vsi->vf->ctrl_vsi_idx = vsi->idx;
} else {
- max_txqs[i] = vsi->alloc_txq;
+ WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
+ pf->ctrl_vsi_idx = vsi->idx;
}
}
- dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (ret) {
- dev_err(dev, "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
- goto unroll_clear_rings;
+ return ret;
+}
+
+/**
+ * ice_vsi_decfg - remove all VSI configuration
+ * @vsi: pointer to VSI
+ */
+void ice_vsi_decfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+
+ ice_fltr_remove_all(vsi);
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, err);
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ /* return value check can be skipped here, it always returns
+ * 0 if reset is in progress
+ */
+ ice_destroy_xdp_rings(vsi);
+
+ ice_vsi_clear_rings(vsi);
+ ice_vsi_free_q_vectors(vsi);
+ ice_vsi_put_qs(vsi);
+ ice_vsi_free_arrays(vsi);
+
+ /* SR-IOV determines needed MSIX resources all at once instead of per
+ * VSI since when VFs are spawned we know how many VFs there are and how
+ * many interrupts each VF needs. SR-IOV MSIX resources are also
+ * cleared in the same manner.
+ */
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ ice_free_vf_ctrl_res(pf, vsi);
+ } else if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ vsi->base_vector = 0;
+ }
+
+ if (vsi->type == ICE_VSI_VF &&
+ vsi->agg_node && vsi->agg_node->valid)
+ vsi->agg_node->num_vsis--;
+ if (vsi->agg_node) {
+ vsi->agg_node->valid = false;
+ vsi->agg_node->agg_id = 0;
+ }
+}
+
+/**
+ * ice_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @params: parameters to use when creating the VSI
+ *
+ * This allocates the sw VSI structure and its queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct on
+ * success, NULL on failure.
+ */
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi;
+ int ret;
+
+ /* ice_vsi_setup can only initialize a new VSI, and we must have
+ * a port_info structure for it.
+ */
+ if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
+ WARN_ON(!params->pi))
+ return NULL;
+
+ vsi = ice_vsi_alloc(pf);
+ if (!vsi) {
+ dev_err(dev, "could not allocate VSI\n");
+ return NULL;
}
+ ret = ice_vsi_cfg(vsi, params);
+ if (ret)
+ goto err_vsi_cfg;
+
/* Add switch rule to drop all Tx Flow Control Frames, of look up
* type ETHERTYPE from VSIs, and restrict malicious VF from sending
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
@@ -2826,34 +2972,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* be dropped so that VFs cannot send LLDP packets to reconfig DCB
* settings in the HW.
*/
- if (!ice_is_safe_mode(pf))
- if (vsi->type == ICE_VSI_PF) {
- ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
- ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
- }
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
+ ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
+ ice_cfg_sw_lldp(vsi, true, true);
+ }
if (!vsi->agg_node)
ice_set_agg_vsi(vsi);
+
return vsi;
-unroll_clear_rings:
- ice_vsi_clear_rings(vsi);
-unroll_vector_base:
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
-unroll_alloc_q_vector:
- ice_vsi_free_q_vectors(vsi);
-unroll_vsi_init:
- ice_vsi_free_stats(vsi);
- ice_vsi_delete(vsi);
-unroll_get_qs:
- ice_vsi_put_qs(vsi);
-unroll_vsi_alloc:
- if (vsi_type == ICE_VSI_VF)
+err_vsi_cfg:
+ if (params->type == ICE_VSI_VF)
ice_enable_lag(pf->lag);
- ice_vsi_clear(vsi);
+ ice_vsi_free(vsi);
return NULL;
}
@@ -3117,37 +3250,6 @@ void ice_napi_del(struct ice_vsi *vsi)
}
/**
- * ice_free_vf_ctrl_res - Free the VF control VSI resource
- * @pf: pointer to PF structure
- * @vsi: the VSI to free resources for
- *
- * Check if the VF control VSI resource is still in use. If no VF is using it
- * any more, release the VSI resource. Otherwise, leave it to be cleaned up
- * once no other VF uses it.
- */
-static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
-{
- struct ice_vf *vf;
- unsigned int bkt;
-
- rcu_read_lock();
- ice_for_each_vf_rcu(pf, bkt, vf) {
- if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
- rcu_read_unlock();
- return;
- }
- }
- rcu_read_unlock();
-
- /* No other VFs left that have control VSI. It is now safe to reclaim
- * SW interrupts back to the common pool.
- */
- ice_free_res(pf->irq_tracker, vsi->base_vector,
- ICE_RES_VF_CTRL_VEC_ID);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
-}
-
-/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3156,7 +3258,6 @@ static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
int ice_vsi_release(struct ice_vsi *vsi)
{
struct ice_pf *pf;
- int err;
if (!vsi->back)
return -ENODEV;
@@ -3174,50 +3275,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
+
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
- /* Disable VSI and free resources */
- if (vsi->type != ICE_VSI_LB)
- ice_vsi_dis_irq(vsi);
ice_vsi_close(vsi);
-
- /* SR-IOV determines needed MSIX resources all at once instead of per
- * VSI since when VFs are spawned we know how many VFs there are and how
- * many interrupts each VF needs. SR-IOV MSIX resources are also
- * cleared in the same manner.
- */
- if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
- ice_free_vf_ctrl_res(pf, vsi);
- } else if (vsi->type != ICE_VSI_VF) {
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- }
-
- if (!ice_is_safe_mode(pf)) {
- if (vsi->type == ICE_VSI_PF) {
- ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
- ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, false);
- /* The Rx rule will only exist to remove if the LLDP FW
- * engine is currently stopped
- */
- if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
- }
- }
-
- if (ice_is_vsi_dflt_vsi(vsi))
- ice_clear_dflt_vsi(vsi);
- ice_fltr_remove_all(vsi);
- ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
- err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
- if (err)
- dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
- vsi->vsi_num, err);
- ice_vsi_delete(vsi);
- ice_vsi_free_q_vectors(vsi);
+ ice_vsi_decfg(vsi);
if (vsi->netdev) {
if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
@@ -3231,19 +3296,12 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- if (vsi->type == ICE_VSI_VF &&
- vsi->agg_node && vsi->agg_node->valid)
- vsi->agg_node->num_vsis--;
- ice_vsi_clear_rings(vsi);
- ice_vsi_free_stats(vsi);
- ice_vsi_put_qs(vsi);
-
/* retain SW VSI data structure since it is needed to unregister and
* free VSI netdev when PF is not in reset recovery pending state,\
* for ex: during rmmod.
*/
if (!ice_is_reset_in_progress(pf->state))
- ice_vsi_clear(vsi);
+ ice_vsi_delete(vsi);
return 0;
}
@@ -3406,29 +3464,31 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
- * @init_vsi: is this an initialization or a reconfigure of the VSI
+ * @vsi_flags: flags used for VSI rebuild flow
+ *
+ * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
+ * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
*
* Returns 0 on success and negative value on failure
*/
-int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
+int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
- u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
- int ret, i, prev_txq, prev_rxq;
+ int ret, prev_txq, prev_rxq;
int prev_num_q_vectors = 0;
- enum ice_vsi_type vtype;
struct ice_pf *pf;
if (!vsi)
return -EINVAL;
+ params = ice_vsi_to_params(vsi);
+ params.flags = vsi_flags;
+
pf = vsi->back;
- vtype = vsi->type;
- if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- ice_vsi_init_vlan_ops(vsi);
-
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
if (!coalesce)
@@ -3439,188 +3499,33 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_txq = vsi->num_txq;
prev_rxq = vsi->num_rxq;
- ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
- ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ ice_vsi_decfg(vsi);
+ ret = ice_vsi_cfg_def(vsi, &params);
if (ret)
- dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
- vsi->vsi_num, ret);
- ice_vsi_free_q_vectors(vsi);
-
- /* SR-IOV determines needed MSIX resources all at once instead of per
- * VSI since when VFs are spawned we know how many VFs there are and how
- * many interrupts each VF needs. SR-IOV MSIX resources are also
- * cleared in the same manner.
- */
- if (vtype != ICE_VSI_VF) {
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- vsi->base_vector = 0;
- }
-
- if (ice_is_xdp_ena_vsi(vsi))
- /* return value check can be skipped here, it always returns
- * 0 if reset is in progress
- */
- ice_destroy_xdp_rings(vsi);
- ice_vsi_put_qs(vsi);
- ice_vsi_clear_rings(vsi);
- ice_vsi_free_arrays(vsi);
- if (vtype == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vsi->vf);
- else
- ice_vsi_set_num_qs(vsi, NULL);
-
- ret = ice_vsi_alloc_arrays(vsi);
- if (ret < 0)
- goto err_vsi;
-
- ice_vsi_get_qs(vsi);
-
- ice_alloc_fd_res(vsi);
- ice_vsi_set_tc_cfg(vsi);
-
- /* Initialize VSI struct elements and create VSI in FW */
- ret = ice_vsi_init(vsi, init_vsi);
- if (ret < 0)
- goto err_vsi;
-
- switch (vtype) {
- case ICE_VSI_CTRL:
- case ICE_VSI_SWITCHDEV_CTRL:
- case ICE_VSI_PF:
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_ring_stats(vsi);
- if (ret)
- goto err_vectors;
-
- ice_vsi_map_rings_to_vectors(vsi);
-
- vsi->stat_offsets_loaded = false;
- if (ice_is_xdp_ena_vsi(vsi)) {
- ret = ice_vsi_determine_xdp_res(vsi);
- if (ret)
- goto err_vectors;
- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
- if (ret)
- goto err_vectors;
- }
- /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
- if (vtype != ICE_VSI_CTRL)
- /* Do not exit if configuring RSS had an issue, at
- * least receive traffic on first queue. Hence no
- * need to capture return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_vsi_cfg_rss_lut_key(vsi);
-
- /* disable or enable CRC stripping */
- if (vsi->netdev)
- ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features &
- NETIF_F_RXFCS));
-
- break;
- case ICE_VSI_VF:
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_ring_stats(vsi);
- if (ret)
- goto err_vectors;
-
- vsi->stat_offsets_loaded = false;
- break;
- case ICE_VSI_CHNL:
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- ice_vsi_cfg_rss_lut_key(vsi);
- ice_vsi_set_rss_flow_fld(vsi);
- }
- break;
- default:
- break;
- }
-
- /* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++) {
- /* configure VSI nodes based on number of queues and TC's.
- * ADQ creates VSIs for each TC/Channel but doesn't
- * allocate queues instead it reconfigures the PF queues
- * as per the TC command. So max_txqs should point to the
- * PF Tx queues.
- */
- if (vtype == ICE_VSI_CHNL)
- max_txqs[i] = pf->num_lan_tx;
- else
- max_txqs[i] = vsi->alloc_txq;
-
- if (ice_is_xdp_ena_vsi(vsi))
- max_txqs[i] += vsi->num_xdp_txq;
- }
-
- if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- /* If MQPRIO is set, means channel code path, hence for main
- * VSI's, use TC as 1
- */
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
- else
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ goto err_vsi_cfg;
+ ret = ice_vsi_cfg_tc_lan(pf, vsi);
if (ret) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
- if (init_vsi) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = -EIO;
- goto err_vectors;
+ goto err_vsi_cfg_tc_lan;
} else {
+ kfree(coalesce);
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
}
if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq))
- goto err_vectors;
+ goto err_vsi_cfg_tc_lan;
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
return 0;
-err_vectors:
- ice_vsi_free_q_vectors(vsi);
-err_rings:
- if (vsi->netdev) {
- vsi->current_netdev_flags = 0;
- unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
-err_vsi:
- ice_vsi_clear(vsi);
- set_bit(ICE_RESET_FAILED, pf->state);
+err_vsi_cfg_tc_lan:
+ ice_vsi_decfg(vsi);
+err_vsi_cfg:
kfree(coalesce);
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index dcdf69a693e9..75221478f2dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -7,6 +7,47 @@
#include "ice.h"
#include "ice_vlan.h"
+/* Flags used for VSI configuration and rebuild */
+#define ICE_VSI_FLAG_INIT BIT(0)
+#define ICE_VSI_FLAG_NO_INIT 0
+
+/**
+ * struct ice_vsi_cfg_params - VSI configuration parameters
+ * @pi: pointer to the port_info instance for the VSI
+ * @ch: pointer to the channel structure for the VSI, may be NULL
+ * @vf: pointer to the VF associated with this VSI, may be NULL
+ * @type: the type of VSI to configure
+ * @flags: VSI flags used for rebuild and configuration
+ *
+ * Parameter structure used when configuring a new VSI.
+ */
+struct ice_vsi_cfg_params {
+ struct ice_port_info *pi;
+ struct ice_channel *ch;
+ struct ice_vf *vf;
+ enum ice_vsi_type type;
+ u32 flags;
+};
+
+/**
+ * ice_vsi_to_params - Get parameters for an existing VSI
+ * @vsi: the VSI to get parameters for
+ *
+ * Fill a parameter structure for reconfiguring a VSI with its current
+ * parameters, such as during a rebuild operation.
+ */
+static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi)
+{
+ struct ice_vsi_cfg_params params = {};
+
+ params.pi = vsi->port_info;
+ params.ch = vsi->ch;
+ params.vf = vsi->vf;
+ params.type = vsi->type;
+
+ return params;
+}
+
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -42,7 +83,6 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
void ice_vsi_delete(struct ice_vsi *vsi);
-int ice_vsi_clear(struct ice_vsi *vsi);
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
@@ -51,9 +91,7 @@ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, struct ice_vf *vf,
- struct ice_channel *ch);
+ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
void ice_napi_del(struct ice_vsi *vsi);
@@ -63,6 +101,7 @@ void ice_vsi_close(struct ice_vsi *vsi);
int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
+void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
@@ -70,7 +109,8 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
-int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
+int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
+int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 074b0e6d0e2d..0712c1055aea 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -45,7 +45,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
-static DEFINE_IDA(ice_aux_ida);
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
EXPORT_SYMBOL(ice_xdp_locking_key);
@@ -539,7 +538,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
/* Disable VFs until reset is completed */
mutex_lock(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf)
- ice_set_vf_state_qs_dis(vf);
+ ice_set_vf_state_dis(vf);
mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) {
@@ -3440,55 +3439,6 @@ static void ice_set_netdev_features(struct net_device *netdev)
}
/**
- * ice_cfg_netdev - Allocate, configure and register a netdev
- * @vsi: the VSI associated with the new netdev
- *
- * Returns 0 on success, negative value on failure
- */
-static int ice_cfg_netdev(struct ice_vsi *vsi)
-{
- struct ice_netdev_priv *np;
- struct net_device *netdev;
- u8 mac_addr[ETH_ALEN];
-
- netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
- vsi->alloc_rxq);
- if (!netdev)
- return -ENOMEM;
-
- set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- vsi->netdev = netdev;
- np = netdev_priv(netdev);
- np->vsi = vsi;
-
- ice_set_netdev_features(netdev);
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_XSK_ZEROCOPY;
-
- ice_set_ops(netdev);
-
- if (vsi->type == ICE_VSI_PF) {
- SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
- ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
- eth_hw_addr_set(netdev, mac_addr);
- ether_addr_copy(netdev->perm_addr, mac_addr);
- }
-
- netdev->priv_flags |= IFF_UNICAST_FLT;
-
- /* Setup netdev TC information */
- ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
-
- /* setup watchdog timeout value to be 5 second */
- netdev->watchdog_timeo = 5 * HZ;
-
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = ICE_MAX_MTU;
-
- return 0;
-}
-
-/**
* ice_fill_rss_lut - Fill the RSS lookup table with default values
* @lut: Lookup table
* @rss_table_size: Lookup table size
@@ -3513,14 +3463,27 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_PF;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_CHNL;
+ params.pi = pi;
+ params.ch = ch;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3534,7 +3497,13 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_CTRL;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3548,7 +3517,13 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_LB;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3708,20 +3683,6 @@ static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
}
/**
- * ice_tc_indir_block_remove - clean indirect TC block notifications
- * @pf: PF structure
- */
-static void ice_tc_indir_block_remove(struct ice_pf *pf)
-{
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
- if (!pf_vsi)
- return;
-
- ice_tc_indir_block_unregister(pf_vsi);
-}
-
-/**
* ice_tc_indir_block_register - Register TC indirect block notifications
* @vsi: VSI struct which has the netdev
*
@@ -3741,76 +3702,6 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)
}
/**
- * ice_setup_pf_sw - Setup the HW switch on startup or after reset
- * @pf: board private structure
- *
- * Returns 0 on success, negative value on failure
- */
-static int ice_setup_pf_sw(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- bool dvm = ice_is_dvm_ena(&pf->hw);
- struct ice_vsi *vsi;
- int status;
-
- if (ice_is_reset_in_progress(pf->state))
- return -EBUSY;
-
- status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
- if (status)
- return -EIO;
-
- vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
- if (!vsi)
- return -ENOMEM;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- status = ice_cfg_netdev(vsi);
- if (status)
- goto unroll_vsi_setup;
- /* netdev has to be configured before setting frame size */
- ice_vsi_cfg_frame_size(vsi);
-
- /* init indirect block notifications */
- status = ice_tc_indir_block_register(vsi);
- if (status) {
- dev_err(dev, "Failed to register netdev notifier\n");
- goto unroll_cfg_netdev;
- }
-
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- /* registering the NAPI handler requires both the queues and
- * netdev to be created, which are done in ice_pf_vsi_setup()
- * and ice_cfg_netdev() respectively
- */
- ice_napi_add(vsi);
-
- status = ice_init_mac_fltr(pf);
- if (status)
- goto unroll_napi_add;
-
- return 0;
-
-unroll_napi_add:
- ice_tc_indir_block_unregister(vsi);
-unroll_cfg_netdev:
- ice_napi_del(vsi);
- if (vsi->netdev) {
- clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
-
-unroll_vsi_setup:
- ice_vsi_release(vsi);
- return status;
-}
-
-/**
* ice_get_avail_q_count - Get count of queues in use
* @pf_qmap: bitmap to get queue use count from
* @lock: pointer to a mutex that protects access to pf_qmap
@@ -4211,12 +4102,13 @@ bool ice_is_wol_supported(struct ice_hw *hw)
* @vsi: VSI being changed
* @new_rx: new number of Rx queues
* @new_tx: new number of Tx queues
+ * @locked: is adev device_lock held
*
* Only change the number of queues if new_tx, or new_rx is non-0.
*
* Returns 0 on success.
*/
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
int err = 0, timeout = 50;
@@ -4238,14 +4130,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- ice_vsi_rebuild(vsi, false);
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done;
}
ice_vsi_close(vsi);
- ice_vsi_rebuild(vsi, false);
- ice_pf_dcb_recfg(pf);
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
@@ -4507,6 +4399,23 @@ err_vsi_open:
return err;
}
+static void ice_deinit_fdir(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
+
+ if (!vsi)
+ return;
+
+ ice_vsi_manage_fdir(vsi, false);
+ ice_vsi_release(vsi);
+ if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[pf->ctrl_vsi_idx] = NULL;
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+}
+
/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance
@@ -4607,116 +4516,173 @@ static void ice_print_wake_reason(struct ice_pf *pf)
/**
* ice_register_netdev - register netdev
- * @pf: pointer to the PF struct
+ * @vsi: pointer to the VSI struct
*/
-static int ice_register_netdev(struct ice_pf *pf)
+static int ice_register_netdev(struct ice_vsi *vsi)
{
- struct ice_vsi *vsi;
- int err = 0;
+ int err;
- vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->netdev)
return -EIO;
err = register_netdev(vsi->netdev);
if (err)
- goto err_register_netdev;
+ return err;
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
return 0;
-err_register_netdev:
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- return err;
+}
+
+static void ice_unregister_netdev(struct ice_vsi *vsi)
+{
+ if (!vsi || !vsi->netdev)
+ return;
+
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
/**
- * ice_probe - Device initialization routine
- * @pdev: PCI device information struct
- * @ent: entry in ice_pci_tbl
+ * ice_cfg_netdev - Allocate, configure and register a netdev
+ * @vsi: the VSI associated with the new netdev
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative value on failure
*/
-static int
-ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+static int ice_cfg_netdev(struct ice_vsi *vsi)
{
- struct device *dev = &pdev->dev;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
- struct ice_hw *hw;
- int i, err;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
- if (pdev->is_virtfn) {
- dev_err(dev, "can't probe a virtual function\n");
- return -EINVAL;
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ ice_set_ops(netdev);
+
+ if (vsi->type == ICE_VSI_PF) {
+ SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
+ ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
+ eth_hw_addr_set(netdev, mac_addr);
}
- /* this driver uses devres, see
- * Documentation/driver-api/driver-model/devres.rst
- */
- err = pcim_enable_device(pdev);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Setup netdev TC information */
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
+ netdev->max_mtu = ICE_MAX_MTU;
+
+ return 0;
+}
+
+static void ice_decfg_netdev(struct ice_vsi *vsi)
+{
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+}
+
+static int ice_start_eth(struct ice_vsi *vsi)
+{
+ int err;
+
+ err = ice_init_mac_fltr(vsi->back);
if (err)
return err;
- err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
- if (err) {
- dev_err(dev, "BAR0 I/O map error %d\n", err);
- return err;
- }
+ rtnl_lock();
+ err = ice_vsi_open(vsi);
+ rtnl_unlock();
- pf = ice_allocate_pf(dev);
- if (!pf)
- return -ENOMEM;
+ return err;
+}
- /* initialize Auxiliary index to invalid value */
- pf->aux_idx = -1;
+static int ice_init_eth(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ int err;
- /* set up for high or low DMA */
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(dev, "DMA configuration failed: 0x%x\n", err);
+ if (!vsi)
+ return -EINVAL;
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
+ if (err)
return err;
- }
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- pci_enable_pcie_error_reporting(pdev);
- pci_set_master(pdev);
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
- pf->pdev = pdev;
- pci_set_drvdata(pdev, pf);
- set_bit(ICE_DOWN, pf->state);
- /* Disable service task until DOWN bit is cleared */
- set_bit(ICE_SERVICE_DIS, pf->state);
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create_pf_port;
- hw = &pf->hw;
- hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
- pci_save_state(pdev);
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
- hw->back = pf;
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
- hw->bus.device = PCI_SLOT(pdev->devfn);
- hw->bus.func = PCI_FUNC(pdev->devfn);
- ice_set_ctrlq_len(hw);
+ err = ice_register_netdev(vsi);
+ if (err)
+ goto err_register_netdev;
- pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+ err = ice_tc_indir_block_register(vsi);
+ if (err)
+ goto err_tc_indir_block_register;
-#ifndef CONFIG_DYNAMIC_DEBUG
- if (debug < -1)
- hw->debug_mask = debug;
-#endif
+ ice_napi_add(vsi);
+
+ return 0;
+
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
+ return err;
+}
+
+static void ice_deinit_eth(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ if (!vsi)
+ return;
+
+ ice_vsi_close(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_decfg_netdev(vsi);
+}
+
+static int ice_init_dev(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err;
err = ice_init_hw(hw);
if (err) {
dev_err(dev, "ice_init_hw failed: %d\n", err);
- err = -EIO;
- goto err_exit_unroll;
+ return err;
}
ice_init_feature_support(pf);
@@ -4739,62 +4705,31 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf_unroll;
+ goto err_init_pf;
}
- ice_devlink_init_regions(pf);
-
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
- i = 0;
if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
- pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.udp_tunnel_nic.tables[0].n_entries =
pf->hw.tnl.valid_count[TNL_VXLAN];
- pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
UDP_TUNNEL_TYPE_VXLAN;
- i++;
}
if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
- pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.udp_tunnel_nic.tables[1].n_entries =
pf->hw.tnl.valid_count[TNL_GENEVE];
- pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
UDP_TUNNEL_TYPE_GENEVE;
- i++;
- }
-
- pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
- if (!pf->num_alloc_vsi) {
- err = -EIO;
- goto err_init_pf_unroll;
- }
- if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
- dev_warn(&pf->pdev->dev,
- "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
- pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
- pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
- }
-
- pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
- GFP_KERNEL);
- if (!pf->vsi) {
- err = -ENOMEM;
- goto err_init_pf_unroll;
- }
-
- pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
- sizeof(*pf->vsi_stats), GFP_KERNEL);
- if (!pf->vsi_stats) {
- err = -ENOMEM;
- goto err_init_vsi_unroll;
}
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_vsi_stats_unroll;
+ goto err_init_interrupt_scheme;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4805,49 +4740,94 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_init_interrupt_unroll;
+ goto err_req_irq_msix_misc;
}
- /* create switch struct for the switch element created by FW on boot */
- pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
- if (!pf->first_sw) {
- err = -ENOMEM;
- goto err_msix_misc_unroll;
- }
+ return 0;
- if (hw->evb_veb)
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
- else
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
+err_req_irq_msix_misc:
+ ice_clear_interrupt_scheme(pf);
+err_init_interrupt_scheme:
+ ice_deinit_pf(pf);
+err_init_pf:
+ ice_deinit_hw(hw);
+ return err;
+}
- pf->first_sw->pf = pf;
+static void ice_deinit_dev(struct ice_pf *pf)
+{
+ ice_free_irq_msix_misc(pf);
+ ice_clear_interrupt_scheme(pf);
+ ice_deinit_pf(pf);
+ ice_deinit_hw(&pf->hw);
+}
- /* record the sw_id available for later use */
- pf->first_sw->sw_id = hw->port_info->sw_id;
+static void ice_init_features(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
- err = ice_setup_pf_sw(pf);
- if (err) {
- dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
- goto err_alloc_sw_unroll;
- }
+ if (ice_is_safe_mode(pf))
+ return;
- clear_bit(ICE_SERVICE_DIS, pf->state);
+ /* initialize DDP driven features */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_init(pf);
- /* tell the firmware we are up */
- err = ice_send_version(pf);
- if (err) {
- dev_err(dev, "probe failed sending driver version %s. error: %d\n",
- UTS_RELEASE, err);
- goto err_send_version_unroll;
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
+ /* Note: Flow director init failure is non-fatal to load */
+ if (ice_init_fdir(pf))
+ dev_err(dev, "could not initialize flow director\n");
+
+ /* Note: DCB init failure is non-fatal to load */
+ if (ice_init_pf_dcb(pf, false)) {
+ clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ ice_cfg_lldp_mib_change(&pf->hw, true);
}
- /* since everything is good, start the service timer */
- mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ if (ice_init_lag(pf))
+ dev_warn(dev, "Failed to init link aggregation support\n");
+}
+
+static void ice_deinit_features(struct ice_pf *pf)
+{
+ ice_deinit_lag(pf);
+ if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ ice_cfg_lldp_mib_change(&pf->hw, false);
+ ice_deinit_fdir(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_release(pf);
+}
+
+static void ice_init_wakeup(struct ice_pf *pf)
+{
+ /* Save wakeup reason register for later use */
+ pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
+
+ /* check for a power management event */
+ ice_print_wake_reason(pf);
+
+ /* clear wake status, all bits */
+ wr32(&pf->hw, PFPM_WUS, U32_MAX);
+
+ /* Disable WoL at init, wait for user to enable */
+ device_set_wakeup_enable(ice_pf_to_dev(pf), false);
+}
+
+static int ice_init_link(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
- goto err_send_version_unroll;
+ return err;
}
/* not a fatal error if this fails */
@@ -4883,123 +4863,350 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
}
- ice_verify_cacheline_size(pf);
+ return err;
+}
- /* Save wakeup reason register for later use */
- pf->wakeup_reason = rd32(hw, PFPM_WUS);
+static int ice_init_pf_sw(struct ice_pf *pf)
+{
+ bool dvm = ice_is_dvm_ena(&pf->hw);
+ struct ice_vsi *vsi;
+ int err;
- /* check for a power management event */
- ice_print_wake_reason(pf);
+ /* create switch struct for the switch element created by FW on boot */
+ pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
+ if (!pf->first_sw)
+ return -ENOMEM;
- /* clear wake status, all bits */
- wr32(hw, PFPM_WUS, U32_MAX);
+ if (pf->hw.evb_veb)
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
- /* Disable WoL at init, wait for user to enable */
- device_set_wakeup_enable(dev, false);
+ pf->first_sw->pf = pf;
- if (ice_is_safe_mode(pf)) {
- ice_set_safe_mode_vlan_cfg(pf);
- goto probe_done;
+ /* record the sw_id available for later use */
+ pf->first_sw->sw_id = pf->hw.port_info->sw_id;
+
+ err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (err)
+ goto err_aq_set_port_params;
+
+ vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
+ if (!vsi) {
+ err = -ENOMEM;
+ goto err_pf_vsi_setup;
}
- /* initialize DDP driven features */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_init(pf);
+ return 0;
- if (ice_is_feature_supported(pf, ICE_F_GNSS))
- ice_gnss_init(pf);
+err_pf_vsi_setup:
+err_aq_set_port_params:
+ kfree(pf->first_sw);
+ return err;
+}
- /* Note: Flow director init failure is non-fatal to load */
- if (ice_init_fdir(pf))
- dev_err(dev, "could not initialize flow director\n");
+static void ice_deinit_pf_sw(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
- /* Note: DCB init failure is non-fatal to load */
- if (ice_init_pf_dcb(pf, false)) {
- clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
- clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
- } else {
- ice_cfg_lldp_mib_change(&pf->hw, true);
+ if (!vsi)
+ return;
+
+ ice_vsi_release(vsi);
+ kfree(pf->first_sw);
+}
+
+static int ice_alloc_vsis(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
+ if (!pf->num_alloc_vsi)
+ return -EIO;
+
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
}
- if (ice_init_lag(pf))
- dev_warn(dev, "Failed to init link aggregation support\n");
+ pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
+ GFP_KERNEL);
+ if (!pf->vsi)
+ return -ENOMEM;
- /* print PCI link speed and width */
- pcie_print_link_status(pf->pdev);
+ pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
+ sizeof(*pf->vsi_stats), GFP_KERNEL);
+ if (!pf->vsi_stats) {
+ devm_kfree(dev, pf->vsi);
+ return -ENOMEM;
+ }
-probe_done:
- err = ice_devlink_create_pf_port(pf);
+ return 0;
+}
+
+static void ice_dealloc_vsis(struct ice_pf *pf)
+{
+ devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
+ pf->vsi_stats = NULL;
+
+ pf->num_alloc_vsi = 0;
+ devm_kfree(ice_pf_to_dev(pf), pf->vsi);
+ pf->vsi = NULL;
+}
+
+static int ice_init_devlink(struct ice_pf *pf)
+{
+ int err;
+
+ err = ice_devlink_register_params(pf);
if (err)
- goto err_create_pf_port;
+ return err;
- vsi = ice_get_main_vsi(pf);
- if (!vsi || !vsi->netdev) {
- err = -EINVAL;
- goto err_netdev_reg;
- }
+ ice_devlink_init_regions(pf);
+ ice_devlink_register(pf);
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+ return 0;
+}
- err = ice_register_netdev(pf);
+static void ice_deinit_devlink(struct ice_pf *pf)
+{
+ ice_devlink_unregister(pf);
+ ice_devlink_destroy_regions(pf);
+ ice_devlink_unregister_params(pf);
+}
+
+static int ice_init(struct ice_pf *pf)
+{
+ int err;
+
+ err = ice_init_dev(pf);
if (err)
- goto err_netdev_reg;
+ return err;
- err = ice_devlink_register_params(pf);
+ err = ice_alloc_vsis(pf);
if (err)
- goto err_netdev_reg;
+ goto err_alloc_vsis;
+
+ err = ice_init_pf_sw(pf);
+ if (err)
+ goto err_init_pf_sw;
+
+ ice_init_wakeup(pf);
+
+ err = ice_init_link(pf);
+ if (err)
+ goto err_init_link;
+
+ err = ice_send_version(pf);
+ if (err)
+ goto err_init_link;
+
+ ice_verify_cacheline_size(pf);
+
+ if (ice_is_safe_mode(pf))
+ ice_set_safe_mode_vlan_cfg(pf);
+ else
+ /* print PCI link speed and width */
+ pcie_print_link_status(pf->pdev);
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
- if (ice_is_rdma_ena(pf)) {
- pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
- if (pf->aux_idx < 0) {
- dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- err = -ENOMEM;
- goto err_devlink_reg_param;
- }
+ clear_bit(ICE_SERVICE_DIS, pf->state);
- err = ice_init_rdma(pf);
- if (err) {
- dev_err(dev, "Failed to initialize RDMA: %d\n", err);
- err = -EIO;
- goto err_init_aux_unroll;
- }
- } else {
- dev_warn(dev, "RDMA is not supported on this device\n");
- }
+ /* since everything is good, start the service timer */
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
- ice_devlink_register(pf);
return 0;
-err_init_aux_unroll:
- pf->adev = NULL;
- ida_free(&ice_aux_ida, pf->aux_idx);
-err_devlink_reg_param:
- ice_devlink_unregister_params(pf);
-err_netdev_reg:
- ice_devlink_destroy_pf_port(pf);
-err_create_pf_port:
-err_send_version_unroll:
- ice_vsi_release_all(pf);
-err_alloc_sw_unroll:
+err_init_link:
+ ice_deinit_pf_sw(pf);
+err_init_pf_sw:
+ ice_dealloc_vsis(pf);
+err_alloc_vsis:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+static void ice_deinit(struct ice_pf *pf)
+{
set_bit(ICE_SERVICE_DIS, pf->state);
set_bit(ICE_DOWN, pf->state);
- devm_kfree(dev, pf->first_sw);
-err_msix_misc_unroll:
- ice_free_irq_msix_misc(pf);
-err_init_interrupt_unroll:
- ice_clear_interrupt_scheme(pf);
-err_init_vsi_stats_unroll:
- devm_kfree(dev, pf->vsi_stats);
- pf->vsi_stats = NULL;
-err_init_vsi_unroll:
- devm_kfree(dev, pf->vsi);
-err_init_pf_unroll:
- ice_deinit_pf(pf);
- ice_devlink_destroy_regions(pf);
- ice_deinit_hw(hw);
-err_exit_unroll:
- pci_disable_pcie_error_reporting(pdev);
+
+ ice_deinit_pf_sw(pf);
+ ice_dealloc_vsis(pf);
+ ice_deinit_dev(pf);
+}
+
+/**
+ * ice_load - load pf by init hw and starting VSI
+ * @pf: pointer to the pf instance
+ */
+int ice_load(struct ice_pf *pf)
+{
+ struct ice_vsi_cfg_params params = {};
+ struct ice_vsi *vsi;
+ int err;
+
+ err = ice_reset(&pf->hw, ICE_RESET_PFR);
+ if (err)
+ return err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ vsi = ice_get_main_vsi(pf);
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ err = ice_vsi_cfg(vsi, &params);
+ if (err)
+ goto err_vsi_cfg;
+
+ err = ice_start_eth(ice_get_main_vsi(pf));
+ if (err)
+ goto err_start_eth;
+
+ err = ice_init_rdma(pf);
+ if (err)
+ goto err_init_rdma;
+
+ ice_init_features(pf);
+ ice_service_task_restart(pf);
+
+ clear_bit(ICE_DOWN, pf->state);
+
+ return 0;
+
+err_init_rdma:
+ ice_vsi_close(ice_get_main_vsi(pf));
+err_start_eth:
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+err_vsi_cfg:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+/**
+ * ice_unload - unload pf by stopping VSI and deinit hw
+ * @pf: pointer to the pf instance
+ */
+void ice_unload(struct ice_pf *pf)
+{
+ ice_deinit_features(pf);
+ ice_deinit_rdma(pf);
+ ice_vsi_close(ice_get_main_vsi(pf));
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+ ice_deinit_dev(pf);
+}
+
+/**
+ * ice_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ice_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+
+ if (pdev->is_virtfn) {
+ dev_err(dev, "can't probe a virtual function\n");
+ return -EINVAL;
+ }
+
+ /* this driver uses devres, see
+ * Documentation/driver-api/driver-model/devres.rst
+ */
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
+ if (err) {
+ dev_err(dev, "BAR0 I/O map error %d\n", err);
+ return err;
+ }
+
+ pf = ice_allocate_pf(dev);
+ if (!pf)
+ return -ENOMEM;
+
+ /* initialize Auxiliary index to invalid value */
+ pf->aux_idx = -1;
+
+ /* set up for high or low DMA */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(dev, "DMA configuration failed: 0x%x\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ pf->pdev = pdev;
+ pci_set_drvdata(pdev, pf);
+ set_bit(ICE_DOWN, pf->state);
+ /* Disable service task until DOWN bit is cleared */
+ set_bit(ICE_SERVICE_DIS, pf->state);
+
+ hw = &pf->hw;
+ hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
+ pci_save_state(pdev);
+
+ hw->back = pf;
+ hw->port_info = NULL;
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+ ice_set_ctrlq_len(hw);
+
+ pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+
+#ifndef CONFIG_DYNAMIC_DEBUG
+ if (debug < -1)
+ hw->debug_mask = debug;
+#endif
+
+ err = ice_init(pf);
+ if (err)
+ goto err_init;
+
+ err = ice_init_eth(pf);
+ if (err)
+ goto err_init_eth;
+
+ err = ice_init_rdma(pf);
+ if (err)
+ goto err_init_rdma;
+
+ err = ice_init_devlink(pf);
+ if (err)
+ goto err_init_devlink;
+
+ ice_init_features(pf);
+
+ return 0;
+
+err_init_devlink:
+ ice_deinit_rdma(pf);
+err_init_rdma:
+ ice_deinit_eth(pf);
+err_init_eth:
+ ice_deinit(pf);
+err_init:
pci_disable_device(pdev);
return err;
}
@@ -5072,66 +5279,42 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
static void ice_remove(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- struct ice_hw *hw;
int i;
- hw = &pf->hw;
-
- ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
break;
msleep(100);
}
- ice_tc_indir_block_remove(pf);
-
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
ice_service_task_stop(pf);
-
ice_aq_cancel_waiting_tasks(pf);
- ice_unplug_aux_dev(pf);
- if (pf->aux_idx >= 0)
- ida_free(&ice_aux_ida, pf->aux_idx);
- ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
- ice_deinit_lag(pf);
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_release(pf);
- if (ice_is_feature_supported(pf, ICE_F_GNSS))
- ice_gnss_exit(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_setup_mc_magic_wake(pf);
+ ice_deinit_features(pf);
+ ice_deinit_devlink(pf);
+ ice_deinit_rdma(pf);
+ ice_deinit_eth(pf);
+ ice_deinit(pf);
+
ice_vsi_release_all(pf);
- mutex_destroy(&hw->fdir_fltr_lock);
- ice_devlink_destroy_pf_port(pf);
+
+ ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
- ice_free_irq_msix_misc(pf);
- ice_for_each_vsi(pf, i) {
- if (!pf->vsi[i])
- continue;
- ice_vsi_free_q_vectors(pf->vsi[i]);
- }
- devm_kfree(&pdev->dev, pf->vsi_stats);
- pf->vsi_stats = NULL;
- ice_deinit_pf(pf);
- ice_devlink_destroy_regions(pf);
- ice_deinit_hw(hw);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped.
*/
- ice_reset(hw, ICE_RESET_PFR);
+ ice_reset(&pf->hw, ICE_RESET_PFR);
pci_wait_for_pending_transaction(pdev);
- ice_clear_interrupt_scheme(pf);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
@@ -5559,7 +5742,7 @@ static int __init ice_module_init(void)
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
- ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
@@ -6165,12 +6348,12 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg - Setup the VSI
+ * ice_vsi_cfg_lan - Setup the VSI lan related config
* @vsi: the VSI being configured
*
* Return 0 on success and negative value on error
*/
-int ice_vsi_cfg(struct ice_vsi *vsi)
+int ice_vsi_cfg_lan(struct ice_vsi *vsi)
{
int err;
@@ -6386,7 +6569,7 @@ int ice_up(struct ice_vsi *vsi)
{
int err;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (!err)
err = ice_up_complete(vsi);
@@ -6954,7 +7137,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (err)
goto err_setup_rx;
@@ -7008,7 +7191,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (err)
goto err_setup_rx;
@@ -7093,7 +7276,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
continue;
/* rebuild the VSI */
- err = ice_vsi_rebuild(vsi, true);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) {
dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
@@ -8434,12 +8617,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
/* clear the VSI from scheduler tree */
ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
- /* Delete VSI from FW */
+ /* Delete VSI from FW, PF and HW VSI arrays */
ice_vsi_delete(ch->ch_vsi);
- /* Delete VSI from PF and HW VSI arrays */
- ice_vsi_clear(ch->ch_vsi);
-
/* free the channel */
kfree(ch);
}
@@ -8498,7 +8678,7 @@ static int ice_rebuild_channels(struct ice_pf *pf)
type = vsi->type;
/* rebuild ADQ VSI */
- err = ice_vsi_rebuild(vsi, true);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) {
dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
ice_vsi_type_str(type), vsi->idx, err);
@@ -8730,14 +8910,14 @@ config_tcf:
cur_rxq = vsi->num_rxq;
/* proceed with rebuild main VSI using correct number of queues */
- ret = ice_vsi_rebuild(vsi, false);
+ ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
if (ret) {
/* fallback to current number of queues */
dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
vsi->req_txq = cur_txq;
vsi->req_rxq = cur_rxq;
clear_bit(ICE_RESET_FAILED, pf->state);
- if (ice_vsi_rebuild(vsi, false)) {
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
dev_err(dev, "Rebuild of main VSI failed again\n");
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 3ba1408c56a9..96a64c25e2ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -41,21 +41,6 @@ static void ice_free_vf_entries(struct ice_pf *pf)
}
/**
- * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
- * @vf: invalidate this VF's VSI after freeing it
- */
-static void ice_vf_vsi_release(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- ice_vsi_release(vsi);
- ice_vf_invalidate_vsi(vf);
-}
-
-/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
@@ -248,11 +233,16 @@ void ice_free_vfs(struct ice_pf *pf)
*/
static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
+ params.type = ICE_VSI_VF;
+ params.pi = ice_vf_get_port_info(vf);
+ params.vf = vf;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ vsi = ice_vsi_setup(pf, &params);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
@@ -583,51 +573,19 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
*/
static int ice_init_vf_vsi_res(struct ice_vf *vf)
{
- struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vf->pf;
- u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
- struct device *dev;
int err;
vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
- dev = ice_pf_to_dev(pf);
vsi = ice_vf_vsi_setup(vf);
if (!vsi)
return -ENOMEM;
- err = ice_vsi_add_vlan_zero(vsi);
- if (err) {
- dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- err = vlan_ops->ena_rx_filtering(vsi);
- if (err) {
- dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- eth_broadcast_addr(broadcast);
- err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
- vf->vf_id, err);
- goto release_vsi;
- }
-
- err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
- if (err) {
- dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
- vf->vf_id);
+ err = ice_vf_init_host_cfg(vf, vsi);
+ if (err)
goto release_vsi;
- }
-
- vf->num_mac = 1;
return 0;
@@ -697,6 +655,21 @@ static void ice_sriov_free_vf(struct ice_vf *vf)
}
/**
+ * ice_sriov_clear_reset_state - clears VF Reset status register
+ * @vf: the vf to configure
+ */
+static void ice_sriov_clear_reset_state(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+
+ /* Clear the reset status register so that VF immediately sees that
+ * the device is resetting, even if hardware hasn't yet gotten around
+ * to clearing VFGEN_RSTAT for us.
+ */
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
+}
+
+/**
* ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
* @vf: the vf to configure
*/
@@ -799,23 +772,19 @@ static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
}
/**
- * ice_sriov_vsi_rebuild - release and rebuild VF's VSI
- * @vf: VF to release and setup the VSI for
+ * ice_sriov_create_vsi - Create a new VSI for a VF
+ * @vf: VF to create the VSI for
*
- * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
- * configuration change, etc.).
+ * This is called by ice_vf_recreate_vsi to create the new VSI after the old
+ * VSI has been released.
*/
-static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
+static int ice_sriov_create_vsi(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
- ice_vf_vsi_release(vf);
- if (!ice_vf_vsi_setup(vf)) {
- dev_err(ice_pf_to_dev(pf),
- "Failed to release and setup the VF%u's VSI\n",
- vf->vf_id);
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
return -ENOMEM;
- }
return 0;
}
@@ -826,8 +795,6 @@ static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
*/
static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
{
- ice_vf_rebuild_host_cfg(vf);
- ice_vf_set_initialized(vf);
ice_ena_vf_mappings(vf);
wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
}
@@ -835,11 +802,13 @@ static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
static const struct ice_vf_ops ice_sriov_vf_ops = {
.reset_type = ICE_VF_RESET,
.free = ice_sriov_free_vf,
+ .clear_reset_state = ice_sriov_clear_reset_state,
.clear_mbx_register = ice_sriov_clear_mbx_register,
.trigger_reset_register = ice_sriov_trigger_reset_register,
.poll_reset_status = ice_sriov_poll_reset_status,
.clear_reset_trigger = ice_sriov_clear_reset_trigger,
- .vsi_rebuild = ice_sriov_vsi_rebuild,
+ .irq_close = NULL,
+ .create_vsi = ice_sriov_create_vsi,
.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
};
@@ -879,21 +848,9 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
/* set sriov vf ops for VFs created during SRIOV flow */
vf->vf_ops = &ice_sriov_vf_ops;
- vf->vf_sw_id = pf->first_sw;
- /* assign default capabilities */
- vf->spoofchk = true;
- vf->num_vf_qs = pf->vfs.num_qps_per;
- ice_vc_set_default_allowlist(vf);
-
- /* ctrl_vsi_idx will be set to a valid value only when VF
- * creates its first fdir rule.
- */
- ice_vf_ctrl_invalidate_vsi(vf);
- ice_vf_fdir_init(vf);
-
- ice_virtchnl_set_dflt_ops(vf);
+ ice_initialize_vf_entry(vf);
- mutex_init(&vf->cfg_lock);
+ vf->vf_sw_id = pf->first_sw;
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
@@ -1285,7 +1242,7 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
goto out_put_vf;
ivi->vf = vf_id;
- ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
+ ether_addr_copy(ivi->mac, vf->hw_lan_addr);
/* VF configuration for VLAN and applicable QoS */
ivi->vlan = ice_vf_get_port_vlan_id(vf);
@@ -1333,8 +1290,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return -EINVAL;
/* nothing left to do, unicast MAC already set */
- if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
- ether_addr_equal(vf->hw_lan_addr.addr, mac)) {
+ if (ether_addr_equal(vf->dev_lan_addr, mac) &&
+ ether_addr_equal(vf->hw_lan_addr, mac)) {
ret = 0;
goto out_put_vf;
}
@@ -1348,8 +1305,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* VF is notified of its new MAC via the PF's response to the
* VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
- ether_addr_copy(vf->dev_lan_addr.addr, mac);
- ether_addr_copy(vf->hw_lan_addr.addr, mac);
+ ether_addr_copy(vf->dev_lan_addr, mac);
+ ether_addr_copy(vf->hw_lan_addr, mac);
if (is_zero_ether_addr(mac)) {
/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf->pf_set_mac = false;
@@ -1750,7 +1707,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
- vf->dev_lan_addr.addr,
+ vf->dev_lan_addr,
test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
? "on" : "off");
}
@@ -1794,7 +1751,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
- vf->dev_lan_addr.addr);
+ vf->dev_lan_addr);
}
}
mutex_unlock(&pf->vfs.table_lock);
@@ -1884,7 +1841,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
if (pf_vsi)
dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dev_lan_addr.addr[0],
+ &vf->dev_lan_addr[0],
pf_vsi->netdev->dev_addr);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 9b762f7972ce..61f844d22512 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
- goto err_free_lkup_exts;
+ goto err_unroll;
/* Group match words into recipes using preferred recipe grouping
* criteria.
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 80706f7330f4..6b48cbc049c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -1688,7 +1688,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
struct ice_vsi *ch_vsi = NULL;
u16 queue = act->rx_queue;
- if (queue > vsi->num_rxq) {
+ if (queue >= vsi->num_rxq) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because specified queue is invalid");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 375eb6493f0f..0e57bd1b85fd 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -237,16 +237,49 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
*/
static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
{
+ /* Close any IRQ mapping now */
+ if (vf->vf_ops->irq_close)
+ vf->vf_ops->irq_close(vf);
+
ice_vf_clear_counters(vf);
vf->vf_ops->clear_reset_trigger(vf);
}
/**
+ * ice_vf_recreate_vsi - Release and re-create the VF's VSI
+ * @vf: VF to recreate the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
+ * VF configuration change, etc)
+ *
+ * It releases and then re-creates a new VSI.
+ */
+static int ice_vf_recreate_vsi(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int err;
+
+ ice_vf_vsi_release(vf);
+
+ err = vf->vf_ops->create_vsi(vf);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to recreate the VF%u's VSI, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_rebuild_vsi - rebuild the VF's VSI
* @vf: VF to rebuild the VSI for
*
* This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
* host, PFR, CORER, etc.).
+ *
+ * It reprograms the VSI configuration back into hardware.
*/
static int ice_vf_rebuild_vsi(struct ice_vf *vf)
{
@@ -256,7 +289,7 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
if (WARN_ON(!vsi))
return -EINVAL;
- if (ice_vsi_rebuild(vsi, true)) {
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
vf->vf_id);
return -EIO;
@@ -271,6 +304,21 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
}
/**
+ * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
+ * @vf: the VF being reset
+ *
+ * Perform reset tasks which must occur after the VSI has been re-created or
+ * rebuilt during a VF reset.
+ */
+static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_rebuild_host_cfg(vf);
+ ice_vf_set_initialized(vf);
+
+ vf->vf_ops->post_vsi_rebuild(vf);
+}
+
+/**
* ice_is_any_vf_in_unicast_promisc - check if any VF(s)
* are in unicast promiscuous mode
* @pf: PF structure for accessing VF(s)
@@ -495,7 +543,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi(vf);
- vf->vf_ops->post_vsi_rebuild(vf);
+ ice_vf_post_vsi_rebuild(vf);
mutex_unlock(&vf->cfg_lock);
}
@@ -639,14 +687,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_vf_pre_vsi_rebuild(vf);
- if (vf->vf_ops->vsi_rebuild(vf)) {
+ if (ice_vf_recreate_vsi(vf)) {
dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
vf->vf_id);
err = -EFAULT;
goto out_unlock;
}
- vf->vf_ops->post_vsi_rebuild(vf);
+ ice_vf_post_vsi_rebuild(vf);
vsi = ice_get_vf_vsi(vf);
if (WARN_ON(!vsi)) {
err = -EINVAL;
@@ -673,7 +721,7 @@ out_unlock:
* ice_set_vf_state_qs_dis - Set VF queues state to disabled
* @vf: pointer to the VF structure
*/
-void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
/* Clear Rx/Tx enabled queues flag */
bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
@@ -681,9 +729,45 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
}
+/**
+ * ice_set_vf_state_dis - Set VF state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_dis(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ vf->vf_ops->clear_reset_state(vf);
+}
+
/* Private functions only accessed from other virtualization files */
/**
+ * ice_initialize_vf_entry - Initialize a VF entry
+ * @vf: pointer to the VF structure
+ */
+void ice_initialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vfs *vfs;
+
+ vfs = &pf->vfs;
+
+ /* assign default capabilities */
+ vf->spoofchk = true;
+ vf->num_vf_qs = vfs->num_qps_per;
+ ice_vc_set_default_allowlist(vf);
+ ice_virtchnl_set_dflt_ops(vf);
+
+ /* ctrl_vsi_idx will be set to a valid value only when iAVF
+ * creates its first fdir rule.
+ */
+ ice_vf_ctrl_invalidate_vsi(vf);
+ ice_vf_fdir_init(vf);
+
+ mutex_init(&vf->cfg_lock);
+}
+
+/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
*/
@@ -924,18 +1008,18 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
vf->num_mac++;
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
- status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
if (status) {
dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
- &vf->hw_lan_addr.addr[0], vf->vf_id,
+ &vf->hw_lan_addr[0], vf->vf_id,
status);
return status;
}
vf->num_mac++;
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
}
return 0;
@@ -1115,11 +1199,16 @@ void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
*/
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
+ params.type = ICE_VSI_CTRL;
+ params.pi = ice_vf_get_port_info(vf);
+ params.vf = vf;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ vsi = ice_vsi_setup(pf, &params);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
ice_vf_ctrl_invalidate_vsi(vf);
@@ -1129,6 +1218,60 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
}
/**
+ * ice_vf_init_host_cfg - Initialize host admin configuration
+ * @vf: VF to initialize
+ * @vsi: the VSI created at initialization
+ *
+ * Initialize the VF host configuration. Called during VF creation to setup
+ * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
+ * should only be called during VF creation.
+ */
+int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ eth_broadcast_addr(broadcast);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ vf->num_mac = 1;
+
+ err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
+ if (err) {
+ dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
* @vf: VF to remove access to VSI for
*/
@@ -1139,6 +1282,24 @@ void ice_vf_invalidate_vsi(struct ice_vf *vf)
}
/**
+ * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
+ * @vf: pointer to the VF structure
+ *
+ * Release the VF associated with this VSI and then invalidate the VSI
+ * indexes.
+ */
+void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vsi_release(vsi);
+ ice_vf_invalidate_vsi(vf);
+}
+
+/**
* ice_vf_set_initialized - VF is ready for VIRTCHNL communication
* @vf: VF to set in initialized state
*
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 52bd9a3816bf..ef30f05b5d02 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -56,11 +56,13 @@ struct ice_mdd_vf_events {
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
void (*free)(struct ice_vf *vf);
+ void (*clear_reset_state)(struct ice_vf *vf);
void (*clear_mbx_register)(struct ice_vf *vf);
void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
bool (*poll_reset_status)(struct ice_vf *vf);
void (*clear_reset_trigger)(struct ice_vf *vf);
- int (*vsi_rebuild)(struct ice_vf *vf);
+ void (*irq_close)(struct ice_vf *vf);
+ int (*create_vsi)(struct ice_vf *vf);
void (*post_vsi_rebuild)(struct ice_vf *vf);
};
@@ -96,8 +98,8 @@ struct ice_vf {
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
- struct virtchnl_ether_addr dev_lan_addr;
- struct virtchnl_ether_addr hw_lan_addr;
+ u8 dev_lan_addr[ETH_ALEN];
+ u8 hw_lan_addr[ETH_ALEN];
struct ice_time_mac legacy_last_added_umac;
DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
@@ -213,7 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
-void ice_set_vf_state_qs_dis(struct ice_vf *vf);
+void ice_set_vf_state_dis(struct ice_vf *vf);
bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
void
ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
@@ -259,7 +261,7 @@ static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
return -EOPNOTSUPP;
}
-static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+static inline void ice_set_vf_state_dis(struct ice_vf *vf)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 9c8ef2b01f0f..6f3293b793b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -23,6 +23,7 @@
#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
#endif
+void ice_initialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
@@ -35,7 +36,9 @@ void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi);
void ice_vf_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_vsi_release(struct ice_vf *vf);
void ice_vf_set_initialized(struct ice_vf *vf);
#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index d4a4001b6e5d..f56fa94ff3d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -39,7 +39,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
-static const u32 ice_legacy_aq_to_vc_speed[15] = {
+static const u32 ice_legacy_aq_to_vc_speed[] = {
VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
VIRTCHNL_LINK_SPEED_100MB,
VIRTCHNL_LINK_SPEED_1GB,
@@ -51,10 +51,6 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */
};
/**
@@ -71,21 +67,20 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
*/
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
- u32 speed;
+ /* convert a BIT() value into an array index */
+ u32 index = fls(link_speed) - 1;
- if (adv_link_support) {
- /* convert a BIT() value into an array index */
- speed = ice_get_link_speed(fls(link_speed) - 1);
- } else {
+ if (adv_link_support)
+ return ice_get_link_speed(index);
+ else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
- speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
- }
+ return ice_legacy_aq_to_vc_speed[index];
- return speed;
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
}
/* The mailbox overflow detection algorithm helps to check if there
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index 5ecc0ee9a78e..b1ffb81893d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
/* outer VLAN ops regardless of port VLAN config */
vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
if (ice_vf_is_port_vlan_ena(vf)) {
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
@@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
} else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
+
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
} else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index dab3cd5d300e..e24e3f5017ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -507,7 +507,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
- vf->hw_lan_addr.addr);
+ vf->hw_lan_addr);
/* match guest capabilities */
vf->driver_caps = vfres->vf_cap_flags;
@@ -1802,10 +1802,10 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
* was correctly specified over VIRTCHNL
*/
if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
- is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
+ is_zero_ether_addr(vf->hw_lan_addr)) ||
ice_is_vc_addr_primary(vc_ether_addr)) {
- ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
- ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
+ ether_addr_copy(vf->dev_lan_addr, mac_addr);
+ ether_addr_copy(vf->hw_lan_addr, mac_addr);
}
/* hardware and device MACs are already set, but its possible that the
@@ -1836,7 +1836,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
int ret;
/* device MAC already added */
- if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
+ if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
return 0;
if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
@@ -1891,8 +1891,8 @@ ice_update_legacy_cached_mac(struct ice_vf *vf,
ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
return;
- ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
- ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr);
}
/**
@@ -1906,15 +1906,15 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
u8 *mac_addr = vc_ether_addr->addr;
if (!is_valid_ether_addr(mac_addr) ||
- !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ !ether_addr_equal(vf->dev_lan_addr, mac_addr))
return;
/* allow the device MAC to be repopulated in the add flow and don't
- * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
+ * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant
* to be persistent on VM reboot and across driver unload/load, which
* won't work if we clear the hardware MAC here
*/
- eth_zero_addr(vf->dev_lan_addr.addr);
+ eth_zero_addr(vf->dev_lan_addr);
ice_update_legacy_cached_mac(vf, vc_ether_addr);
}
@@ -1934,7 +1934,7 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
int status;
if (!ice_can_vf_change_mac(vf) &&
- ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ ether_addr_equal(vf->dev_lan_addr, mac_addr))
return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
@@ -3733,7 +3733,7 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
int result;
if (!is_unicast_ether_addr(mac_addr) ||
- ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
+ ether_addr_equal(mac_addr, vf->hw_lan_addr))
continue;
if (vf->pf_set_mac) {
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index c6a58343d81d..e6ef6b303222 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -113,7 +113,7 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
- if (!pf->vsi[vf->lan_vsi_idx])
+ if (!ice_get_vf_vsi(vf))
return -EINVAL;
return 0;
@@ -494,7 +494,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
vf_prof = fdir->fdir_prof[flow];
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
return;
@@ -572,7 +572,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi)
return -EINVAL;
@@ -1205,7 +1205,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 0e11a082f7a1..087c950fea0b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2810,6 +2810,22 @@ static int igb_offload_txtime(struct igb_adapter *adapter,
return 0;
}
+static int igb_tc_query_caps(struct igb_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->broken_mqprio = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static LIST_HEAD(igb_block_cb_list);
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -2818,6 +2834,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
struct igb_adapter *adapter = netdev_priv(dev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return igb_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_CBS:
return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK:
@@ -3200,8 +3218,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_pci_reg;
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -3633,7 +3649,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -3844,8 +3859,6 @@ static void igb_remove(struct pci_dev *pdev)
kfree(adapter->shadow_vfta);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8b572cd2c350..2928a6c73692 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
- !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
+ !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
+ (rd32(IGC_TDH(tx_ring->reg_idx)) !=
+ readl(tx_ring->tail))) {
/* detected Tx unit hang */
netdev_err(tx_ring->netdev,
"Detected Tx Unit Hang\n"
@@ -5069,6 +5071,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * igc_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: queue number that timed out
+ **/
+static void igc_tx_timeout(struct net_device *netdev,
+ unsigned int __always_unused txqueue)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ wr32(IGC_EICS,
+ (adapter->eims_enable_mask & ~adapter->eims_other));
+}
+
+/**
* igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
@@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work)
case SPEED_100:
case SPEED_1000:
case SPEED_2500:
- adapter->tx_timeout_factor = 7;
+ adapter->tx_timeout_factor = 1;
break;
}
@@ -6205,12 +6225,35 @@ static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
return igc_tsn_offload_apply(adapter);
}
+static int igc_tc_query_caps(struct igc_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->broken_mqprio = true;
+
+ if (hw->mac.type == igc_i225)
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return igc_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_TAPRIO:
return igc_tsn_enable_qbv_scheduling(adapter, type_data);
@@ -6324,6 +6367,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
+ .ndo_tx_timeout = igc_tx_timeout,
.ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
@@ -6434,8 +6478,6 @@ static int igc_probe(struct pci_dev *pdev,
if (err)
goto err_pci_reg;
- pci_enable_pcie_error_reporting(pdev);
-
err = pci_enable_ptm(pdev, NULL);
if (err < 0)
dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
@@ -6643,7 +6685,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -6691,8 +6732,6 @@ static void igc_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index c34734d432e0..4e10ced736db 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -417,10 +417,12 @@ static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
*
* We need to convert the system time value stored in the RX/TXSTMP registers
* into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * Returns 0 on success.
**/
-static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
- struct skb_shared_hwtstamps *hwtstamps,
- u64 systim)
+static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
{
switch (adapter->hw.mac.type) {
case igc_i225:
@@ -430,8 +432,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
systim & 0xFFFFFFFF);
break;
default:
- break;
+ return -EINVAL;
}
+ return 0;
}
/**
@@ -652,7 +655,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
- igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval))
+ return;
switch (adapter->link_speed) {
case SPEED_10:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index af4c12b6059f..a3aaf051f9f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10812,8 +10812,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -11244,7 +11242,6 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -11333,8 +11330,6 @@ static void ixgbe_remove(struct pci_dev *pdev)
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index dc2989103a77..1cb4f59c0050 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -38,7 +38,7 @@
#include <net/ipv6.h>
#include <net/tso.h>
#include <net/page_pool.h>
-#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <linux/bpf_trace.h>
/* Registers */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index bda1a6fa2ec4..e4407f09c9d3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1500,6 +1500,9 @@ static const struct devlink_param rvu_af_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
+};
+
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
"npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1556,7 +1559,6 @@ int rvu_register_dl(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
struct devlink *dl;
- size_t size;
int err;
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
@@ -1578,21 +1580,32 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
+ err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl_health;
+ }
+
/* Register exact match devlink only for CN10K-B */
- size = ARRAY_SIZE(rvu_af_dl_params);
if (!rvu_npc_exact_has_match_table(rvu))
- size -= 1;
+ goto done;
- err = devlink_params_register(dl, rvu_af_dl_params, size);
+ err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
if (err) {
dev_err(rvu->dev,
- "devlink params register failed with error %d", err);
- goto err_dl_health;
+ "devlink exact match params register failed with error %d", err);
+ goto err_dl_exact_match;
}
+done:
devlink_register(dl);
return 0;
+err_dl_exact_match:
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
err_dl_health:
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
@@ -1605,8 +1618,14 @@ void rvu_unregister_dl(struct rvu *rvu)
struct devlink *dl = rvu_dl->dl;
devlink_unregister(dl);
- devlink_params_unregister(dl, rvu_af_dl_params,
- ARRAY_SIZE(rvu_af_dl_params));
+
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
+ /* Unregister exact match devlink only for CN10K-B */
+ if (rvu_npc_exact_has_match_table(rvu))
+ devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index f69102d20c90..20ebb9c95c73 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -200,10 +200,8 @@ void npc_config_secret_key(struct rvu *rvu, int blkaddr)
struct rvu_hwinfo *hw = rvu->hw;
u8 intf;
- if (!hwcap->npc_hash_extract) {
- dev_info(rvu->dev, "HW does not support secret key configuration\n");
+ if (!hwcap->npc_hash_extract)
return;
- }
for (intf = 0; intf < hw->npc_intfs; intf++) {
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
@@ -221,10 +219,8 @@ void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
struct rvu_hwinfo *hw = rvu->hw;
u8 intf;
- if (!hwcap->npc_hash_extract) {
- dev_dbg(rvu->dev, "Field hash extract feature is not supported\n");
+ if (!hwcap->npc_hash_extract)
return;
- }
for (intf = 0; intf < hw->npc_intfs; intf++) {
npc_program_mkex_hash_rx(rvu, blkaddr, intf);
@@ -1853,19 +1849,13 @@ int rvu_npc_exact_init(struct rvu *rvu)
/* Check exact match feature is supported */
npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
- if (!(npc_const3 & BIT_ULL(62))) {
- dev_info(rvu->dev, "%s: No support for exact match support\n",
- __func__);
+ if (!(npc_const3 & BIT_ULL(62)))
return 0;
- }
/* Check if kex profile has enabled EXACT match nibble */
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
- if (!(cfg & NPC_EXACT_NIBBLE_HIT)) {
- dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n",
- __func__);
+ if (!(cfg & NPC_EXACT_NIBBLE_HIT))
return 0;
- }
/* Set capability to true */
rvu->hw->cap.npc_exact_match_enabled = true;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ac54b6f2bb5c..14be6ea51b88 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1621,8 +1621,8 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
if (IS_ERR(pp))
return pp;
- err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
- id, PAGE_SIZE);
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -1921,7 +1921,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
while (done < budget) {
unsigned int pktlen, *rxdcsum;
+ bool has_hwaccel_tag = false;
struct net_device *netdev;
+ u16 vlan_proto, vlan_tci;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@@ -2061,27 +2063,29 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- if (trxd.rxd3 & RX_DMA_VTAG_V2)
- __vlan_hwaccel_put_tag(skb,
- htons(RX_DMA_VPID(trxd.rxd4)),
- RX_DMA_VID(trxd.rxd4));
+ if (trxd.rxd3 & RX_DMA_VTAG_V2) {
+ vlan_proto = RX_DMA_VPID(trxd.rxd4);
+ vlan_tci = RX_DMA_VID(trxd.rxd4);
+ has_hwaccel_tag = true;
+ }
} else if (trxd.rxd2 & RX_DMA_VTAG) {
- __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
- RX_DMA_VID(trxd.rxd3));
+ vlan_proto = RX_DMA_VPID(trxd.rxd3);
+ vlan_tci = RX_DMA_VID(trxd.rxd3);
+ has_hwaccel_tag = true;
}
}
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
- if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
- unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
+ if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
+ unsigned int port = vlan_proto & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
eth->dsa_meta[port])
skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
-
- __vlan_hwaccel_clear_tag(skb);
+ } else if (has_hwaccel_tag) {
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
}
skb_record_rx_queue(skb, 0);
@@ -3177,7 +3181,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
- if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
+ if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index dff0e3ad2de6..afc9d52e79bf 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -533,7 +533,7 @@
#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1)
#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2)
-#define SGMII_DUPLEX_FULL BIT(4)
+#define SGMII_DUPLEX_HALF BIT(4)
#define SGMII_IF_MODE_BIT5 BIT(5)
#define SGMII_REMOTE_FAULT_DIS BIT(8)
#define SGMII_CODE_SYNC_SET_VAL BIT(9)
@@ -1066,11 +1066,13 @@ struct mtk_soc_data {
* @regmap: The register map pointing at the range used to setup
* SGMII modes
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ * @interface: Currently configured interface mode
* @pcs: Phylink PCS structure
*/
struct mtk_pcs {
struct regmap *regmap;
u32 ana_rgc3;
+ phy_interface_t interface;
struct phylink_pcs pcs;
};
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 451a87b1bc20..6883eb34cd8b 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -615,8 +615,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
- flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
- GFP_ATOMIC);
+ flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
if (!flow_info)
return;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 16b02e1d4649..5e8bc48252b1 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -279,7 +279,6 @@ struct mtk_flow_entry {
struct {
struct mtk_flow_entry *base_flow;
struct hlist_node list;
- struct {} end;
} l2_data;
};
struct rhash_head node;
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 5c286f2c9418..bb00de1003ac 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -43,11 +43,6 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
int advertise, link_timer;
bool changed, use_an;
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- rgc3 = RG_PHY_SPEED_3_125G;
- else
- rgc3 = 0;
-
advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
advertising);
if (advertise < 0)
@@ -88,9 +83,22 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
bmcr = 0;
}
- /* Configure the underlying interface speed */
- regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
- RG_PHY_SPEED_3_125G, rgc3);
+ if (mpcs->interface != interface) {
+ /* PHYA power down */
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
+ SGMII_PHYA_PWD, SGMII_PHYA_PWD);
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ rgc3 = RG_PHY_SPEED_3_125G;
+ else
+ rgc3 = 0;
+
+ /* Configure the underlying interface speed */
+ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
+ RG_PHY_SPEED_3_125G, rgc3);
+
+ mpcs->interface = interface;
+ }
/* Update the advertisement, noting whether it has changed */
regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
@@ -108,9 +116,17 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
- /* Release PHYA power down state */
- regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
- SGMII_PHYA_PWD, 0);
+ /* Release PHYA power down state
+ * Only removing bit SGMII_PHYA_PWD isn't enough.
+ * There are cases when the SGMII_PHYA_PWD register contains 0x9 which
+ * prevents SGMII from working. The SGMII still shows link but no traffic
+ * can flow. Writing 0x0 to the PHYA_PWD register fix the issue. 0x0 was
+ * taken from a good working state of the SGMII interface.
+ * Unknown how much the QPHY needs but it is racy without a sleep.
+ * Tested on mt7622 & mt7986.
+ */
+ usleep_range(50, 100);
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
return changed;
}
@@ -138,11 +154,11 @@ static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
else
sgm_mode = SGMII_SPEED_1000;
- if (duplex == DUPLEX_FULL)
- sgm_mode |= SGMII_DUPLEX_FULL;
+ if (duplex != DUPLEX_FULL)
+ sgm_mode |= SGMII_DUPLEX_HALF;
regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
- SGMII_DUPLEX_FULL | SGMII_SPEED_MASK,
+ SGMII_DUPLEX_HALF | SGMII_SPEED_MASK,
sgm_mode);
}
}
@@ -171,6 +187,8 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
return PTR_ERR(ss->pcs[i].regmap);
ss->pcs[i].pcs.ops = &mtk_pcs_ops;
+ ss->pcs[i].pcs.poll = true;
+ ss->pcs[i].interface = PHY_INTERFACE_MODE_NA;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 6152f77dcfd8..277738c50c56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4031,7 +4031,6 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_params_unregister;
pci_save_state(pdev);
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devl_unlock(devlink);
devlink_register(devlink);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 382d02f6619c..b00e33ed05e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -104,6 +104,7 @@ static bool mlx5_cmd_is_throttle_opcode(u16 op)
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_SYNC_CRYPTO:
return true;
}
return false;
@@ -523,6 +524,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
case MLX5_CMD_OP_SAVE_VHCA_STATE:
case MLX5_CMD_OP_LOAD_VHCA_STATE:
+ case MLX5_CMD_OP_SYNC_CRYPTO:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -ENOLINK;
@@ -725,6 +727,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(SYNC_CRYPTO);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 3e232a65a0c3..bb95b40d25eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -245,8 +245,9 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
pages = dev->priv.dbg.pages_debugfs;
debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
- debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
- debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
+ debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
+ debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
+ debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 95a69544a685..b742e04deec1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -156,6 +156,11 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (mlx5_core_is_mp_slave(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported for multi port slave");
+ return -EOPNOTSUPP;
+ }
+
if (pci_num_vf(pdev)) {
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
}
@@ -263,9 +268,10 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap_event_ctx trap_event_ctx;
enum devlink_trap_action action_orig;
struct mlx5_devlink_trap *dl_trap;
- int err = 0;
+ int err;
if (is_mdev_switchdev_mode(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
@@ -275,26 +281,25 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
dl_trap = mlx5_find_trap_by_id(dev, trap->id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
- err = -EOPNOTSUPP;
- goto out;
- }
+ if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP)
+ return -EOPNOTSUPP;
if (action == dl_trap->trap.action)
- goto out;
+ return 0;
action_orig = dl_trap->trap.action;
dl_trap->trap.action = action;
+ trap_event_ctx.trap = &dl_trap->trap;
+ trap_event_ctx.err = 0;
err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
- &dl_trap->trap);
- if (err)
+ &trap_event_ctx);
+ if (err == NOTIFY_BAD)
dl_trap->trap.action = action_orig;
-out:
- return err;
+
+ return trap_event_ctx.err;
}
static const struct devlink_ops mlx5_devlink_ops = {
@@ -744,7 +749,6 @@ void mlx5_devlink_traps_unregister(struct devlink *devlink)
int mlx5_devlink_params_register(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
err = devl_params_register(devlink, mlx5_devlink_params,
@@ -762,9 +766,6 @@ int mlx5_devlink_params_register(struct devlink *devlink)
if (err)
goto max_uc_list_err;
- if (!mlx5_core_is_mp_slave(dev))
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
-
return 0;
max_uc_list_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 1c1b62ee84bb..b561107e0df1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -24,6 +24,11 @@ struct mlx5_devlink_trap {
struct list_head list;
};
+struct mlx5_devlink_trap_event_ctx {
+ struct mlx5_trap_ctx *trap;
+ int err;
+};
+
struct mlx5_core_dev;
void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
struct devlink_port *dl_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 21831386b26e..f40497823e65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
MLX5_GET(mtrc_cap, out, num_string_trace);
tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+ tracer->str_db.loaded = false;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
@@ -233,6 +234,8 @@ static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer)
int i;
for (i = 0; i < num_string_db; i++) {
+ if (!string_db_size_out[i])
+ continue;
tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL);
if (!tracer->str_db.buffer[i])
goto free_strings_db;
@@ -278,6 +281,8 @@ static void mlx5_tracer_read_strings_db(struct work_struct *work)
}
for (i = 0; i < num_string_db; i++) {
+ if (!tracer->str_db.size_out[i])
+ continue;
offset = 0;
MLX5_SET(mtrc_stdb, in, string_db_index, i);
num_of_reads = tracer->str_db.size_out[i] /
@@ -384,6 +389,8 @@ static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer
str_ptr = tracer_event->string_event.string_param;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ if (!tracer->str_db.size_out[i])
+ continue;
if (str_ptr > tracer->str_db.base_address_out[i] &&
str_ptr < tracer->str_db.base_address_out[i] +
tracer->str_db.size_out[i]) {
@@ -459,6 +466,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer,
tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id);
tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost);
+ tracer_event->out = trace;
switch (tracer_event->event_id) {
case TRACER_EVENT_TYPE_TIMESTAMP:
@@ -581,6 +589,26 @@ void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
mlx5_tracer_clean_message(str_frmt);
}
+static int mlx5_tracer_handle_raw_string(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+
+ cur_string = mlx5_tracer_message_insert(tracer, tracer_event);
+ if (!cur_string)
+ return -1;
+
+ cur_string->event_id = tracer_event->event_id;
+ cur_string->timestamp = tracer_event->string_event.timestamp;
+ cur_string->lost = tracer_event->lost_event;
+ cur_string->string = "0x%08x%08x";
+ cur_string->num_of_params = 2;
+ cur_string->params[0] = upper_32_bits(*tracer_event->out);
+ cur_string->params[1] = lower_32_bits(*tracer_event->out);
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ return 0;
+}
+
static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
@@ -589,7 +617,7 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
if (tracer_event->string_event.tdsn == 0) {
cur_string = mlx5_tracer_get_string(tracer, tracer_event);
if (!cur_string)
- return -1;
+ return mlx5_tracer_handle_raw_string(tracer, tracer_event);
cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
cur_string->last_param_num = 0;
@@ -602,9 +630,9 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
} else {
cur_string = mlx5_tracer_message_get(tracer, tracer_event);
if (!cur_string) {
- pr_debug("%s Got string event for unknown string tdsm: %d\n",
+ pr_debug("%s Got string event for unknown string tmsn: %d\n",
__func__, tracer_event->string_event.tmsn);
- return -1;
+ return mlx5_tracer_handle_raw_string(tracer, tracer_event);
}
cur_string->last_param_num += 1;
if (cur_string->last_param_num > TRACER_MAX_PARAMS) {
@@ -756,6 +784,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
+ tracer->buff.consumer_index = 0;
return err;
}
@@ -820,7 +849,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
tracer->owner = false;
- tracer->buff.consumer_index = 0;
return;
}
@@ -930,6 +958,14 @@ unlock:
return err;
}
+static void mlx5_fw_tracer_update_db(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, update_db_work);
+
+ mlx5_fw_tracer_reload(tracer);
+}
+
/* Create software resources (Buffers, etc ..) */
struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
{
@@ -957,6 +993,8 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change);
INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db);
INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces);
+ INIT_WORK(&tracer->update_db_work, mlx5_fw_tracer_update_db);
+ mutex_init(&tracer->state_lock);
err = mlx5_query_mtrc_caps(tracer);
@@ -1003,11 +1041,15 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
if (IS_ERR_OR_NULL(tracer))
return 0;
- dev = tracer->dev;
-
if (!tracer->str_db.loaded)
queue_work(tracer->work_queue, &tracer->read_fw_strings_work);
+ mutex_lock(&tracer->state_lock);
+ if (test_and_set_bit(MLX5_TRACER_STATE_UP, &tracer->state))
+ goto unlock;
+
+ dev = tracer->dev;
+
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
@@ -1028,6 +1070,8 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
goto err_notifier_unregister;
}
+unlock:
+ mutex_unlock(&tracer->state_lock);
return 0;
err_notifier_unregister:
@@ -1037,6 +1081,7 @@ err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work);
+ mutex_unlock(&tracer->state_lock);
return err;
}
@@ -1046,17 +1091,27 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
if (IS_ERR_OR_NULL(tracer))
return;
+ mutex_lock(&tracer->state_lock);
+ if (!test_and_clear_bit(MLX5_TRACER_STATE_UP, &tracer->state))
+ goto unlock;
+
mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
tracer->owner);
mlx5_eq_notifier_unregister(tracer->dev, &tracer->nb);
cancel_work_sync(&tracer->ownership_change_work);
cancel_work_sync(&tracer->handle_traces_work);
+ /* It is valid to get here from update_db_work. Hence, don't wait for
+ * update_db_work to finished.
+ */
+ cancel_work(&tracer->update_db_work);
if (tracer->owner)
mlx5_fw_tracer_ownership_release(tracer);
mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey);
mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
+unlock:
+ mutex_unlock(&tracer->state_lock);
}
/* Free software resources (Buffers, etc ..) */
@@ -1073,6 +1128,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
+ mutex_destroy(&tracer->state_lock);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
@@ -1082,6 +1138,8 @@ static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
struct mlx5_core_dev *dev;
int err;
+ if (test_and_set_bit(MLX5_TRACER_RECREATE_DB, &tracer->state))
+ return 0;
cancel_work_sync(&tracer->read_fw_strings_work);
mlx5_fw_tracer_clean_ready_list(tracer);
mlx5_fw_tracer_clean_print_hash(tracer);
@@ -1092,17 +1150,18 @@ static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
err = mlx5_query_mtrc_caps(tracer);
if (err) {
mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
- return err;
+ goto out;
}
err = mlx5_fw_tracer_allocate_strings_db(tracer);
if (err) {
mlx5_core_warn(dev, "FWTracer: Allocate strings DB failed %d\n", err);
- return err;
+ goto out;
}
mlx5_fw_tracer_init_saved_traces_array(tracer);
-
- return 0;
+out:
+ clear_bit(MLX5_TRACER_RECREATE_DB, &tracer->state);
+ return err;
}
int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer)
@@ -1142,6 +1201,9 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
queue_work(tracer->work_queue, &tracer->handle_traces_work);
break;
+ case MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE:
+ queue_work(tracer->work_queue, &tracer->update_db_work);
+ break;
default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
eqe->sub_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 4762b55b0b0e..5c548bb74f07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -63,6 +63,11 @@ struct mlx5_fw_trace_data {
char msg[TRACE_STR_MSG];
};
+enum mlx5_fw_tracer_state {
+ MLX5_TRACER_STATE_UP = BIT(0),
+ MLX5_TRACER_RECREATE_DB = BIT(1),
+};
+
struct mlx5_fw_tracer {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -104,6 +109,9 @@ struct mlx5_fw_tracer {
struct work_struct handle_traces_work;
struct hlist_head hash[MESSAGE_HASH_SIZE];
struct list_head ready_strings_list;
+ struct work_struct update_db_work;
+ struct mutex state_lock; /* Synchronize update work with reload flows */
+ unsigned long state;
};
struct tracer_string_format {
@@ -158,6 +166,7 @@ struct tracer_event {
struct tracer_string_event string_event;
struct tracer_timestamp_event timestamp_event;
};
+ u64 *out;
};
struct mlx5_ifc_tracer_event_bits {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index b70e36025d92..9a3878f9e582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -95,7 +95,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_host_pf_cleanup(dev);
- err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
+ err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 6f8723cc6874..125c7cb7d839 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -454,6 +454,7 @@ struct mlx5e_txqsq {
struct mlx5_clock *clock;
struct net_device *netdev;
struct mlx5_core_dev *mdev;
+ struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
/* control path */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 57f4b1b50421..7ac1ad9c46de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -314,11 +314,11 @@ static int port_set_buffer(struct mlx5e_priv *priv,
err = port_update_shared_buffer(priv->mdev, current_headroom_size,
new_headroom_size);
if (err)
- return err;
+ goto out;
err = port_update_pool_cfg(priv->mdev, port_buffer);
if (err)
- return err;
+ goto out;
err = mlx5e_port_set_pbmc(mdev, in);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 8469e9c38670..9a1bc93b7dc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -771,8 +771,8 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
- mlx5e_trigger_napi_sched(&c->napi);
}
+ mlx5e_trigger_napi_sched(&c->napi);
}
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 8099a21e674c..ce85b48d327d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -438,10 +438,6 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
- /* only handle the event on native eswtich of representor */
- if (!mlx5_esw_bridge_is_local(dev, rep, esw))
- break;
-
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index b195dbbf6c90..41e356d9d785 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -81,6 +81,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
mlx5e_activate_txqsq(sq);
+ if (sq->channel)
+ mlx5e_trigger_napi_icosq(sq->channel);
+ else
+ mlx5e_trigger_napi_sched(sq->cq.napi);
return 0;
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 78c427b38048..c095a12346de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -232,9 +232,9 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->ifindexes[if_count] = out_dev->ifindex;
parse_state->if_count++;
is_uplink_rep = mlx5e_eswitch_uplink_rep(out_dev);
- err = mlx5_lag_do_mirred(priv->mdev, out_dev);
- if (err)
- return err;
+
+ if (mlx5_lag_mpesw_do_mirred(priv->mdev, out_dev, extack))
+ return -EOPNOTSUPP;
out_dev = get_fdb_out_dev(uplink_dev, out_dev);
if (!out_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index b86ac604d0c2..2e0d88b513aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -44,19 +44,17 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, vlan_idx)) {
+ NL_SET_ERR_MSG_MOD(extack, "firmware vlan actions is not supported");
+ return -EOPNOTSUPP;
+ }
+
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported");
- return -EOPNOTSUPP;
- }
-
+ if (vlan_idx)
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
- } else {
+ else
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- }
break;
case FLOW_ACTION_VLAN_PUSH:
attr->vlan_vid[vlan_idx] = act->vlan.vid;
@@ -65,25 +63,10 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan push action is not supported for vlan depth > 1");
- return -EOPNOTSUPP;
- }
-
+ if (vlan_idx)
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- } else {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
- (act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio)) {
- NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported");
- return -EOPNOTSUPP;
- }
-
+ else
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- }
break;
case FLOW_ACTION_VLAN_POP_ETH:
parse_state->eth_pop = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 7758a425bfa8..8218c892b161 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -204,13 +204,15 @@ mlx5e_flow_meter_create_aso_obj(struct mlx5e_flow_meters *flow_meters, int *obj_
u32 in[MLX5_ST_SZ_DW(create_flow_meter_aso_obj_in)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct mlx5_core_dev *mdev = flow_meters->mdev;
- void *obj;
+ void *obj, *param;
int err;
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
- MLX5_SET(general_obj_in_cmd_hdr, in, log_obj_range, flow_meters->log_granularity);
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_create_param, param, log_obj_range,
+ flow_meters->log_granularity);
obj = MLX5_ADDR_OF(create_flow_meter_aso_obj_in, in, flow_meter_aso_obj);
MLX5_SET(flow_meter_aso_obj, obj, meter_aso_access_pd, flow_meters->pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 313df8232db7..193562c14c44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1073,12 +1073,16 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
struct flow_action_entry *meta_action;
unsigned long cookie = flow->cookie;
+ enum ip_conntrack_info ctinfo;
struct mlx5_ct_entry *entry;
int err;
meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
if (!meta_action)
return -EOPNOTSUPP;
+ ctinfo = meta_action->ct_metadata.cookie & NFCT_INFOMASK;
+ if (ctinfo == IP_CT_NEW)
+ return -EOPNOTSUPP;
spin_lock_bh(&ct_priv->ht_lock);
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index e84c3400ba1d..7b0d3de0ec6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -158,6 +158,11 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->family = x->props.family;
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
+ attrs->upspec.dport = ntohs(x->sel.dport);
+ attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
+ attrs->upspec.sport = ntohs(x->sel.sport);
+ attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
+ attrs->upspec.proto = x->sel.proto;
mlx5e_ipsec_init_limits(sa_entry, attrs);
}
@@ -221,6 +226,13 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
return -EINVAL;
}
+
+ if (x->sel.proto != IPPROTO_IP &&
+ (x->sel.proto != IPPROTO_UDP || x->xso.dir != XFRM_DEV_OFFLOAD_OUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
+ return -EINVAL;
+ }
+
switch (x->xso.type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
@@ -517,6 +529,12 @@ static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x,
return -EINVAL;
}
+ if (x->selector.proto != IPPROTO_IP &&
+ (x->selector.proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -537,6 +555,11 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
attrs->reqid = x->xfrm_vec[0].reqid;
+ attrs->upspec.dport = ntohs(sel->dport);
+ attrs->upspec.dport_mask = ntohs(sel->dport_mask);
+ attrs->upspec.sport = ntohs(sel->sport);
+ attrs->upspec.sport_mask = ntohs(sel->sport_mask);
+ attrs->upspec.proto = sel->proto;
}
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 8bed9c361075..b387adca9c20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -52,6 +52,14 @@ struct aes_gcm_keymat {
u32 aes_key[256 / 32];
};
+struct upspec {
+ u16 dport;
+ u16 dport_mask;
+ u16 sport;
+ u16 sport_mask;
+ u8 proto;
+};
+
struct mlx5_accel_esp_xfrm_attrs {
u32 esn;
u32 spi;
@@ -68,6 +76,7 @@ struct mlx5_accel_esp_xfrm_attrs {
__be32 a6[4];
} daddr;
+ struct upspec upspec;
u8 dir : 2;
u8 esn_overlap : 1;
u8 esn_trigger : 1;
@@ -181,6 +190,7 @@ struct mlx5_accel_pol_xfrm_attrs {
__be32 a6[4];
} daddr;
+ struct upspec upspec;
u8 family;
u8 action;
u8 type : 2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 9f19f4b59a70..5da6fe68eea6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -467,6 +467,27 @@ static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
misc_parameters_2.metadata_reg_c_0, reqid);
}
+static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
+{
+ if (upspec->proto != IPPROTO_UDP)
+ return;
+
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
+ if (upspec->dport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
+ upspec->dport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->dport);
+ }
+
+ if (upspec->sport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
+ upspec->sport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->sport);
+ }
+}
+
static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
struct mlx5_flow_act *flow_act)
{
@@ -654,6 +675,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
@@ -728,6 +750,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
&flow_act);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 2461462b7b99..7fb3835befbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -4,7 +4,7 @@
#include "mlx5_core.h"
#include "en.h"
#include "ipsec.h"
-#include "lib/mlx5.h"
+#include "lib/crypto.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
@@ -92,7 +92,6 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
lower_32_bits(attrs->hard_packet_limit));
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
- MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->soft_packet_limit != XFRM_INF) {
@@ -329,8 +328,7 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
if (attrs->soft_packet_limit != XFRM_INF)
if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
- !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
- !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
+ !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm))
xfrm_state_check_expire(sa_entry->x);
unlock:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index eb5b09f81dec..cf704f106b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -4,16 +4,16 @@
#include <linux/debugfs.h>
#include "en.h"
#include "lib/mlx5.h"
+#include "lib/crypto.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id)
+struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct tls_crypto_info *crypto_info)
{
+ const void *key;
u32 sz_bytes;
- void *key;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
@@ -33,17 +33,16 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
break;
}
default:
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- return mlx5_create_encryption_key(mdev, key, sz_bytes,
- MLX5_ACCEL_OBJ_TLS_KEY,
- p_key_id);
+ return mlx5_crypto_dek_create(dek_pool, key, sz_bytes);
}
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
+void mlx5_ktls_destroy_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek)
{
- mlx5_destroy_encryption_key(mdev, key_id);
+ mlx5_crypto_dek_destroy(dek_pool, dek);
}
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
@@ -189,6 +188,7 @@ static void mlx5e_tls_debugfs_init(struct mlx5e_tls *tls,
int mlx5e_ktls_init(struct mlx5e_priv *priv)
{
+ struct mlx5_crypto_dek_pool *dek_pool;
struct mlx5e_tls *tls;
if (!mlx5e_is_ktls_device(priv->mdev))
@@ -197,9 +197,15 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv)
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
if (!tls)
return -ENOMEM;
+ tls->mdev = priv->mdev;
+ dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY);
+ if (IS_ERR(dek_pool)) {
+ kfree(tls);
+ return PTR_ERR(dek_pool);
+ }
+ tls->dek_pool = dek_pool;
priv->tls = tls;
- priv->tls->mdev = priv->mdev;
mlx5e_tls_debugfs_init(tls, priv->dfs_root);
@@ -216,6 +222,7 @@ void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
debugfs_remove_recursive(tls->debugfs.dfs);
tls->debugfs.dfs = NULL;
+ mlx5_crypto_dek_pool_destroy(tls->dek_pool);
kfree(priv->tls);
priv->tls = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index fccf995ee16d..f11075e67658 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -10,10 +10,12 @@
#include "en.h"
#ifdef CONFIG_MLX5_EN_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id);
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
+#include "lib/crypto.h"
+
+struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct tls_crypto_info *crypto_info);
+void mlx5_ktls_destroy_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek);
static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
{
@@ -83,6 +85,7 @@ struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
struct mlx5e_tls_tx_pool *tx_pool;
+ struct mlx5_crypto_dek_pool *dek_pool;
struct mlx5e_tls_debugfs debugfs;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 3e54834747ce..4be770443b0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -50,7 +50,7 @@ struct mlx5e_ktls_offload_context_rx {
struct mlx5e_tls_sw_stats *sw_stats;
struct completion add_ctx;
struct mlx5e_tir tir;
- u32 key_id;
+ struct mlx5_crypto_dek *dek;
u32 rxq;
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
@@ -148,7 +148,8 @@ post_static_params(struct mlx5e_icosq *sq,
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
mlx5e_tir_get_tirn(&priv_rx->tir),
- priv_rx->key_id, priv_rx->resync.seq, false,
+ mlx5_crypto_dek_get_id(priv_rx->dek),
+ priv_rx->resync.seq, false,
TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
@@ -610,20 +611,22 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
struct tls_context *tls_ctx;
- struct mlx5_core_dev *mdev;
+ struct mlx5_crypto_dek *dek;
struct mlx5e_priv *priv;
int rxq, err;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL);
if (unlikely(!priv_rx))
return -ENOMEM;
- err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id);
- if (err)
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
goto err_create_key;
+ }
+ priv_rx->dek = dek;
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
@@ -673,7 +676,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
err_post_wqes:
mlx5e_tir_destroy(&priv_rx->tir);
err_create_tir:
- mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+ mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
err_create_key:
kfree(priv_rx);
return err;
@@ -683,11 +686,9 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
- struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
priv = netdev_priv(netdev);
- mdev = priv->mdev;
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
@@ -707,7 +708,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
mlx5e_tir_destroy(&priv_rx->tir);
- mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+ mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
/* priv_rx should normally be freed here, but if there is an outstanding
* GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
* processed.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 6db27062b765..60b3e08a1028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -98,7 +98,7 @@ struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx;
struct mlx5_core_dev *mdev;
struct mlx5e_tls_sw_stats *sw_stats;
- u32 key_id;
+ struct mlx5_crypto_dek *dek;
u8 create_err : 1;
};
@@ -457,6 +457,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_tls_tx_pool *pool;
struct tls_context *tls_ctx;
+ struct mlx5_crypto_dek *dek;
struct mlx5e_priv *priv;
int err;
@@ -468,9 +469,12 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
if (IS_ERR(priv_tx))
return PTR_ERR(priv_tx);
- err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
- if (err)
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
goto err_create_key;
+ }
+ priv_tx->dek = dek;
priv_tx->expected_seq = start_offload_tcp_sn;
switch (crypto_info->cipher_type) {
@@ -512,7 +516,7 @@ void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
pool = priv->tls->tx_pool;
atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
- mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
+ mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_tx->dek);
pool_push(pool, priv_tx);
}
@@ -551,8 +555,9 @@ post_static_params(struct mlx5e_txqsq *sq,
pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
- priv_tx->tisn, priv_tx->key_id, 0, fence,
- TLS_OFFLOAD_CTX_DIR_TX);
+ priv_tx->tisn,
+ mlx5_crypto_dek_get_id(priv_tx->dek),
+ 0, fence, TLS_OFFLOAD_CTX_DIR_TX);
tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
sq->pc += num_wqebbs;
}
@@ -894,8 +899,6 @@ static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls,
return;
tls->debugfs.dfs_tx = debugfs_create_dir("tx", dfs_root);
- if (!tls->debugfs.dfs_tx)
- return;
debugfs_create_size_t("pool_size", 0400, tls->debugfs.dfs_tx,
&tls->tx_pool->size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 7f6b940830b3..08d0929e8260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -7,7 +7,7 @@
#include "en.h"
#include "lib/aso.h"
-#include "lib/mlx5.h"
+#include "lib/crypto.h"
#include "en_accel/macsec.h"
#include "en_accel/macsec_fs.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 68f19324db93..4c9a3210600c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -31,6 +31,7 @@
*/
#include "en.h"
+#include "lib/crypto.h"
/* mlx5e global resources should be placed in this file.
* Global resources are common to all the netdevices created on the same nic.
@@ -104,6 +105,13 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
+ mdev->mlx5e_res.dek_priv = mlx5_crypto_dek_init(mdev);
+ if (IS_ERR(mdev->mlx5e_res.dek_priv)) {
+ mlx5_core_err(mdev, "crypto dek init failed, %ld\n",
+ PTR_ERR(mdev->mlx5e_res.dek_priv));
+ mdev->mlx5e_res.dek_priv = NULL;
+ }
+
return 0;
err_destroy_mkey:
@@ -119,6 +127,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
+ mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
+ mdev->mlx5e_res.dek_priv = NULL;
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 7298fe782e9e..05796f8b1d7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -450,7 +450,7 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (fs->vlan->cvlan_filter_disabled)
+ if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
return;
fs->vlan->cvlan_filter_disabled = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e4996ef04d86..ec81d935262f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,7 @@
#include <linux/if_bridge.h>
#include <linux/filter.h>
#include <net/page_pool.h>
+#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
#include "en.h"
@@ -180,17 +181,21 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
+ struct mlx5_devlink_trap_event_ctx *trap_event_ctx = data;
int err;
switch (event) {
case MLX5_DRIVER_EVENT_TYPE_TRAP:
- err = mlx5e_handle_trap_event(priv, data);
+ err = mlx5e_handle_trap_event(priv, trap_event_ctx->trap);
+ if (err) {
+ trap_event_ctx->err = err;
+ return NOTIFY_BAD;
+ }
break;
default:
- netdev_warn(priv->netdev, "Sync event: Unknown event %ld\n", event);
- err = -EINVAL;
+ return NOTIFY_DONE;
}
- return err;
+ return NOTIFY_OK;
}
static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
@@ -592,7 +597,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->ix = c->ix;
rq->channel = c;
rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->hw_mtu =
+ MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
rq->xdpsq = &c->rq_xdpsq;
rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
@@ -1015,35 +1021,6 @@ int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
return mlx5e_rq_to_ready(rq, curr_state);
}
-static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
-{
- struct mlx5_core_dev *mdev = rq->mdev;
-
- void *in;
- void *rqc;
- int inlen;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
-
- MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
- MLX5_SET64(modify_rq_in, in, modify_bitmask,
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
- MLX5_SET(rqc, rqc, scatter_fcs, enable);
- MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
-
- err = mlx5_core_modify_rq(mdev, rq->rqn, in);
-
- kvfree(in);
-
- return err;
-}
-
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -1470,6 +1447,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;
+ sq->channel = c;
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
@@ -2482,8 +2460,6 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_xsk(c);
else
mlx5e_activate_rq(&c->rq);
-
- mlx5e_trigger_napi_icosq(c);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2575,13 +2551,19 @@ err_free:
return err;
}
-static void mlx5e_activate_channels(struct mlx5e_channels *chs)
+static void mlx5e_activate_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
int i;
for (i = 0; i < chs->num; i++)
mlx5e_activate_channel(chs->c[i]);
+ if (priv->htb)
+ mlx5e_qos_activate_queues(priv);
+
+ for (i = 0; i < chs->num; i++)
+ mlx5e_trigger_napi_icosq(chs->c[i]);
+
if (chs->ptp)
mlx5e_ptp_activate_channel(chs->ptp);
}
@@ -2888,9 +2870,7 @@ out:
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_build_txq_maps(priv);
- mlx5e_activate_channels(&priv->channels);
- if (priv->htb)
- mlx5e_qos_activate_queues(priv);
+ mlx5e_activate_channels(priv, &priv->channels);
mlx5e_xdp_tx_enable(priv);
/* dev_watchdog() wants all TX queues to be started when the carrier is
@@ -2998,32 +2978,37 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset)
{
- struct mlx5e_channels new_chs = {};
+ struct mlx5e_channels *new_chs;
int err;
reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
if (!reset)
return mlx5e_switch_priv_params(priv, params, preactivate, context);
- new_chs.params = *params;
+ new_chs = kzalloc(sizeof(*new_chs), GFP_KERNEL);
+ if (!new_chs)
+ return -ENOMEM;
+ new_chs->params = *params;
- mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
+ mlx5e_selq_prepare_params(&priv->selq, &new_chs->params);
- err = mlx5e_open_channels(priv, &new_chs);
+ err = mlx5e_open_channels(priv, new_chs);
if (err)
goto err_cancel_selq;
- err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
+ err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
if (err)
goto err_close;
+ kfree(new_chs);
return 0;
err_close:
- mlx5e_close_channels(&new_chs);
+ mlx5e_close_channels(new_chs);
err_cancel_selq:
mlx5e_selq_cancel(&priv->selq);
+ kfree(new_chs);
return err;
}
@@ -3315,20 +3300,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tises(priv);
}
-static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
-{
- int err = 0;
- int i;
-
- for (i = 0; i < chs->num; i++) {
- err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
int err;
@@ -3904,41 +3875,27 @@ static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
+static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool enable = *(bool *)ctx;
+
+ return mlx5e_set_rx_port_ts(mdev, enable);
+}
+
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels *chs = &priv->channels;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_params new_params;
int err;
mutex_lock(&priv->state_lock);
- if (enable) {
- err = mlx5e_set_rx_port_ts(mdev, false);
- if (err)
- goto out;
-
- chs->params.scatter_fcs_en = true;
- err = mlx5e_modify_channels_scatter_fcs(chs, true);
- if (err) {
- chs->params.scatter_fcs_en = false;
- mlx5e_set_rx_port_ts(mdev, true);
- }
- } else {
- chs->params.scatter_fcs_en = false;
- err = mlx5e_modify_channels_scatter_fcs(chs, false);
- if (err) {
- chs->params.scatter_fcs_en = true;
- goto out;
- }
- err = mlx5e_set_rx_port_ts(mdev, true);
- if (err) {
- mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
- err = 0;
- }
- }
-
-out:
+ new_params = chs->params;
+ new_params.scatter_fcs_en = enable;
+ err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
+ &new_params.scatter_fcs_en, true);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -4075,6 +4032,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_GRO_HW)
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+
return features;
}
@@ -5986,7 +5947,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
}
mlx5e_dcbnl_init_app(priv);
- mlx5_uplink_netdev_set(mdev, netdev);
+ mlx5_core_uplink_netdev_set(mdev, netdev);
mlx5e_params_print_info(mdev, &priv->channels.params);
return 0;
@@ -6010,6 +5971,7 @@ static void mlx5e_remove(struct auxiliary_device *adev)
struct mlx5e_priv *priv = mlx5e_dev->priv;
pm_message_t state = {};
+ mlx5_core_uplink_netdev_set(priv->mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 4e6f5caf8ab6..e2ec80ebde58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1124,8 +1124,6 @@ static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc,
return;
tc->dfs_root = debugfs_create_dir("tc", dfs_root);
- if (!tc->dfs_root)
- return;
debugfs_create_file("hairpin_num_queues", 0644, tc->dfs_root,
&tc->hairpin_params, &fops_hairpin_queues);
@@ -1884,7 +1882,6 @@ post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
bool vf_tun;
int err = 0;
@@ -1896,12 +1893,6 @@ post_process_attr(struct mlx5e_tc_flow *flow,
if (err)
goto err_out;
- if (mlx5e_is_eswitch_flow(flow)) {
- err = mlx5_eswitch_add_vlan_action(esw, attr);
- if (err)
- goto err_out;
- }
-
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err)
@@ -2105,8 +2096,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (mlx5_flow_has_geneve_opt(flow))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
- mlx5_eswitch_del_vlan_action(esw, attr);
-
if (flow->decap_route)
mlx5e_detach_decap_route(priv, flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 9b44557e7271..66ec7932f008 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -817,9 +817,12 @@ static void comp_irqs_release(struct mlx5_core_dev *dev)
static int comp_irqs_request(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ const struct cpumask *prev = cpu_none_mask;
+ const struct cpumask *mask;
int ncomp_eqs = table->num_comp_eqs;
u16 *cpus;
int ret;
+ int cpu;
int i;
ncomp_eqs = table->num_comp_eqs;
@@ -838,8 +841,19 @@ static int comp_irqs_request(struct mlx5_core_dev *dev)
ret = -ENOMEM;
goto free_irqs;
}
- for (i = 0; i < ncomp_eqs; i++)
- cpus[i] = cpumask_local_spread(i, dev->priv.numa_node);
+
+ i = 0;
+ rcu_read_lock();
+ for_each_numa_hop_mask(mask, dev->priv.numa_node) {
+ for_each_cpu_andnot(cpu, mask, prev) {
+ cpus[i] = cpu;
+ if (++i == ncomp_eqs)
+ goto spread_done;
+ }
+ prev = mask;
+ }
+spread_done:
+ rcu_read_unlock();
ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs);
kfree(cpus);
if (ret < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index b176648d1343..3cdcb0e0b20f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1715,7 +1715,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
struct mlx5_esw_bridge *bridge;
port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
- if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
+ if (!port)
return;
bridge = port->bridge;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 5b5a215a7dc5..fd03f076551b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -222,7 +222,6 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle **send_to_vport_meta_rules;
struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi;
- int vlan_push_pop_refcount;
struct mlx5_fs_chains *esw_chains_priv;
struct {
@@ -520,10 +519,6 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
-int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr);
-int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 3a82e385544d..8fb09143e9e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -579,16 +579,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
+ return ERR_PTR(-EOPNOTSUPP);
+
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
flow_act.action = attr->action;
- /* if per flow vlan pop/push is emulated, don't set that into the firmware */
- if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
- MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
@@ -829,204 +829,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
__mlx5_eswitch_del_rule(esw, rule, attr, true);
}
-static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
-{
- struct mlx5_eswitch_rep *rep;
- unsigned long i;
- int err = 0;
-
- esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
- mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
- if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
- continue;
-
- err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
- if (err)
- goto out;
- }
-
-out:
- return err;
-}
-
-static struct mlx5_eswitch_rep *
-esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
-{
- struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
-
- in_rep = attr->in_rep;
- out_rep = attr->dests[0].rep;
-
- if (push)
- vport = in_rep;
- else if (pop)
- vport = out_rep;
- else
- vport = in_rep;
-
- return vport;
-}
-
-static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
- bool push, bool pop, bool fwd)
-{
- struct mlx5_eswitch_rep *in_rep, *out_rep;
-
- if ((push || pop) && !fwd)
- goto out_notsupp;
-
- in_rep = attr->in_rep;
- out_rep = attr->dests[0].rep;
-
- if (push && in_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
- if (!push && !pop && fwd)
- if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- /* protects against (1) setting rules with different vlans to push and
- * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
- */
- if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
- goto out_notsupp;
-
- return 0;
-
-out_notsupp:
- return -EOPNOTSUPP;
-}
-
-int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr)
-{
- struct offloads_fdb *offloads = &esw->fdb_table.offloads;
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- struct mlx5_eswitch_rep *vport = NULL;
- bool push, pop, fwd;
- int err = 0;
-
- /* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- return 0;
-
- push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
- pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
- !attr->dest_chain);
-
- mutex_lock(&esw->state_lock);
-
- err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
- if (err)
- goto unlock;
-
- attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED;
-
- vport = esw_vlan_action_get_vport(esw_attr, push, pop);
-
- if (!push && !pop && fwd) {
- /* tracks VF --> wire rules without vlan push action */
- if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
- vport->vlan_refcount++;
- attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
- }
-
- goto unlock;
- }
-
- if (!push && !pop)
- goto unlock;
-
- if (!(offloads->vlan_push_pop_refcount)) {
- /* it's the 1st vlan rule, apply global vlan pop policy */
- err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
- if (err)
- goto out;
- }
- offloads->vlan_push_pop_refcount++;
-
- if (push) {
- if (vport->vlan_refcount)
- goto skip_set_push;
-
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
- 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
- if (err)
- goto out;
- vport->vlan = esw_attr->vlan_vid[0];
-skip_set_push:
- vport->vlan_refcount++;
- }
-out:
- if (!err)
- attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
-unlock:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr)
-{
- struct offloads_fdb *offloads = &esw->fdb_table.offloads;
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- struct mlx5_eswitch_rep *vport = NULL;
- bool push, pop, fwd;
- int err = 0;
-
- /* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- return 0;
-
- if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED))
- return 0;
-
- push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
- pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-
- mutex_lock(&esw->state_lock);
-
- vport = esw_vlan_action_get_vport(esw_attr, push, pop);
-
- if (!push && !pop && fwd) {
- /* tracks VF --> wire rules without vlan push action */
- if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
- vport->vlan_refcount--;
-
- goto out;
- }
-
- if (push) {
- vport->vlan_refcount--;
- if (vport->vlan_refcount)
- goto skip_unset_push;
-
- vport->vlan = 0;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
- 0, 0, SET_VLAN_STRIP);
- if (err)
- goto out;
- }
-
-skip_unset_push:
- offloads->vlan_push_pop_refcount--;
- if (offloads->vlan_push_pop_refcount)
- goto out;
-
- /* no more vlan rules, stop global vlan pop policy */
- err = esw_set_global_vlan_pop(esw, 0);
-
-out:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_eswitch *from_esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 9459e56ee90a..718cf09c28ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -424,6 +424,7 @@ int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_b
return blocking_notifier_chain_register(&events->sw_nh, nb);
}
+EXPORT_SYMBOL(mlx5_blocking_notifier_register);
int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
@@ -431,6 +432,7 @@ int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier
return blocking_notifier_chain_unregister(&events->sw_nh, nb);
}
+EXPORT_SYMBOL(mlx5_blocking_notifier_unregister);
int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
void *data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 32d4c967469c..374c17445e54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -272,8 +272,6 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
unsigned int size;
int err;
- if (ft_attr->max_fte != POOL_NEXT_SIZE)
- size = roundup_pow_of_two(ft_attr->max_fte);
size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
if (!size)
return -ENOSPC;
@@ -412,11 +410,6 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
MLX5_SET(create_flow_group_in, in, table_id, ft->id);
- if (ft->vport) {
- MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
- MLX5_SET(create_flow_group_in, in, other_vport, 1);
- }
-
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index dd43a940499b..fad479df12e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1776,7 +1776,6 @@ static int build_match_list(struct match_list *match_head,
{
struct rhlist_head *tmp, *list;
struct mlx5_flow_group *g;
- int err = 0;
rcu_read_lock();
INIT_LIST_HEAD(&match_head->list);
@@ -1802,7 +1801,7 @@ static int build_match_list(struct match_list *match_head,
list_add_tail(&curr_match->list, &match_head->list);
}
rcu_read_unlock();
- return err;
+ return 0;
}
static u64 matched_fgs_get_version(struct list_head *match_head)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index f34e758a2f1f..7bb7be01225a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -267,6 +267,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, crypto)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_CRYPTO);
+ if (err)
+ return err;
+ }
+
if (MLX5_CAP_GEN(dev, shampo)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 1da4da564e6d..63290da84010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -371,6 +371,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "PCI link not ready (0x%04x) after %llu ms\n",
reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
err = -ETIMEDOUT;
+ goto restore;
}
do {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 879555ba847d..1e8bee906c31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -62,7 +62,7 @@ enum {
};
enum {
- MLX5_DROP_NEW_HEALTH_WORK,
+ MLX5_DROP_HEALTH_WORK,
};
enum {
@@ -675,7 +675,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
devlink = priv_to_devlink(dev);
mutex_lock(&dev->intf_state_mutex);
- if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
+ if (test_bit(MLX5_DROP_HEALTH_WORK, &health->flags)) {
mlx5_core_err(dev, "health works are not permitted at this stage\n");
mutex_unlock(&dev->intf_state_mutex);
return;
@@ -771,14 +771,8 @@ static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev)
void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
- spin_lock_irqsave(&health->wq_lock, flags);
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
- queue_work(health->wq, &health->fatal_report_work);
- else
- mlx5_core_err(dev, "new health works are not permitted at this stage\n");
- spin_unlock_irqrestore(&health->wq_lock, flags);
+ queue_work(health->wq, &health->fatal_report_work);
}
#define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60)
@@ -858,7 +852,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
timer_setup(&health->timer, poll_health, 0);
health->fatal_error = MLX5_SENSOR_NO_ERR;
- clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ clear_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -869,13 +863,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
{
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
- if (disable_health) {
- spin_lock_irqsave(&health->wq_lock, flags);
- set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- spin_unlock_irqrestore(&health->wq_lock, flags);
- }
+ if (disable_health)
+ set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
del_timer_sync(&health->timer);
}
@@ -891,11 +881,8 @@ void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
- spin_lock_irqsave(&health->wq_lock, flags);
- set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- spin_unlock_irqrestore(&health->wq_lock, flags);
+ set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
cancel_delayed_work_sync(&health->update_fw_log_ts_work);
cancel_work_sync(&health->report_work);
cancel_work_sync(&health->fatal_report_work);
@@ -928,7 +915,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
kfree(name);
if (!health->wq)
goto out_err;
- spin_lock_init(&health->wq_lock);
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index eff92dc0927c..779d92b762d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -172,6 +172,7 @@ enum mlx5_ptys_rate {
MLX5_PTYS_RATE_EDR = 1 << 5,
MLX5_PTYS_RATE_HDR = 1 << 6,
MLX5_PTYS_RATE_NDR = 1 << 7,
+ MLX5_PTYS_RATE_XDR = 1 << 8,
};
static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
@@ -185,20 +186,21 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
case MLX5_PTYS_RATE_EDR: return 25000;
case MLX5_PTYS_RATE_HDR: return 50000;
case MLX5_PTYS_RATE_NDR: return 100000;
+ case MLX5_PTYS_RATE_XDR: return 200000;
default: return -1;
}
}
-static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
+static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
{
int rate, width;
rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper);
if (rate < 0)
- return -EINVAL;
+ return SPEED_UNKNOWN;
width = mlx5_ptys_width_enum_to_int(ib_link_width_oper);
if (width < 0)
- return -EINVAL;
+ return SPEED_UNKNOWN;
return rate * width;
}
@@ -221,16 +223,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper);
- if (speed < 0)
- return -EINVAL;
+ link_ksettings->base.speed = speed;
+ link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL;
- link_ksettings->base.duplex = DUPLEX_FULL;
link_ksettings->base.port = PORT_OTHER;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
- link_ksettings->base.speed = speed;
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index b8feaf0f5c4c..f4b777d4e108 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -22,7 +22,7 @@ static int type_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
char *mode = NULL;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
mode = get_str_mode_type(ldev);
@@ -41,7 +41,7 @@ static int port_sel_mode_show(struct seq_file *file, void *priv)
int ret = 0;
char *mode;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags);
@@ -61,7 +61,7 @@ static int state_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool active;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
active = __mlx5_lag_is_active(ldev);
mutex_unlock(&ldev->lock);
@@ -77,7 +77,7 @@ static int flags_show(struct seq_file *file, void *priv)
bool shared_fdb;
bool lag_active;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (!lag_active)
@@ -108,7 +108,7 @@ static int mapping_show(struct seq_file *file, void *priv)
int num_ports;
int i;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
@@ -142,7 +142,7 @@ static int members_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
int i;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
for (i = 0; i < ldev->ports; i++) {
if (!ldev->pf[i].dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index ad32b80e8501..dbf218cac535 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1187,7 +1187,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
tmp_dev = mlx5_get_next_phys_dev_lag(dev);
if (tmp_dev)
- ldev = tmp_dev->priv.lag;
+ ldev = mlx5_lag_dev(tmp_dev);
if (!ldev) {
ldev = mlx5_lag_dev_alloc(dev);
@@ -1386,8 +1386,7 @@ bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_sriov(ldev) &&
- test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+ res = ldev && test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
spin_unlock_irqrestore(&lag_lock, flags);
return res;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index f30ac2de639f..66013bef9939 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -50,19 +50,6 @@ struct lag_tracker {
enum netdev_lag_hash hash_type;
};
-enum mpesw_op {
- MLX5_MPESW_OP_ENABLE,
- MLX5_MPESW_OP_DISABLE,
-};
-
-struct mlx5_mpesw_work_st {
- struct work_struct work;
- struct mlx5_lag *lag;
- enum mpesw_op op;
- struct completion comp;
- int result;
-};
-
/* LAG data of a ConnectX card.
* It serves both its phys functions.
*/
@@ -124,8 +111,6 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
-void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
-int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index d9fcb9ed726f..d85a8dfc153d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -28,13 +28,9 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
{
- struct mlx5_lag *ldev;
- bool res;
-
- ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_multipath(ldev);
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
- return res;
+ return ldev && __mlx5_lag_is_multipath(ldev);
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index c17e8f1ec914..3799f89ed1a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -58,7 +58,7 @@ static void mlx5_mpesw_work(struct work_struct *work)
static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
enum mpesw_op op)
{
- struct mlx5_lag *ldev = dev->priv.lag;
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
struct mlx5_mpesw_work_st *work;
int err = 0;
@@ -96,25 +96,27 @@ int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
}
-int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
+int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_lag *ldev = mdev->priv.lag;
+ struct mlx5_lag *ldev = mlx5_lag_dev(mdev);
if (!netif_is_bond_master(out_dev) || !ldev)
return 0;
- if (ldev->mode == MLX5_LAG_MODE_MPESW)
- return -EOPNOTSUPP;
+ if (ldev->mode != MLX5_LAG_MODE_MPESW)
+ return 0;
- return 0;
+ NL_SET_ERR_MSG_MOD(extack, "can't forward to bond in mpesw mode");
+ return -EOPNOTSUPP;
}
bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
{
- bool ret;
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
- ret = dev->priv.lag && dev->priv.lag->mode == MLX5_LAG_MODE_MPESW;
- return ret;
+ return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
}
void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
index 88e8daffcf92..818f19b5a984 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
@@ -12,8 +12,25 @@ struct lag_mpesw {
atomic_t mpesw_rule_count;
};
-int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
+enum mpesw_op {
+ MLX5_MPESW_OP_ENABLE,
+ MLX5_MPESW_OP_DISABLE,
+};
+
+struct mlx5_mpesw_work_st {
+ struct work_struct work;
+ struct mlx5_lag *lag;
+ enum mpesw_op op;
+ struct completion comp;
+ int result;
+};
+
+int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
+void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
+int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);
#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
void mlx5_lag_mpesw_init(struct mlx5_lag *ldev);
void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 75510a12ab02..4c9a40211059 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -362,7 +362,7 @@ static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
return mlx5_ptp_adjtime(ptp, delta);
}
-static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
+static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
@@ -370,7 +370,15 @@ static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
return 0;
MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
- MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
+
+ if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) {
+ MLX5_SET(mtutc_reg, in, freq_adj_units,
+ MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm);
+ } else {
+ MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
+ }
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
@@ -385,7 +393,8 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_adjfreq_real_time(mdev, scaled_ppm_to_ppb(scaled_ppm));
+
+ err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index e995f8378df7..3a94b8f8031e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -2,53 +2,253 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "mlx5_core.h"
-#include "lib/mlx5.h"
+#include "lib/crypto.h"
-int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
- void *key, u32 sz_bytes,
- u32 key_type, u32 *p_key_id)
-{
- u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
- u32 sz_bits = sz_bytes * BITS_PER_BYTE;
- u8 general_obj_key_size;
- u64 general_obj_types;
- void *obj, *key_p;
- int err;
+#define MLX5_CRYPTO_DEK_POOLS_NUM (MLX5_ACCEL_OBJ_TYPE_KEY_NUM - 1)
+#define type2idx(type) ((type) - 1)
- obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
- key_p = MLX5_ADDR_OF(encryption_key_obj, obj, key);
+#define MLX5_CRYPTO_DEK_POOL_SYNC_THRESH 128
- general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
- if (!(general_obj_types &
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY))
- return -EINVAL;
+/* calculate the num of DEKs, which are freed by any user
+ * (for example, TLS) after last revalidation in a pool or a bulk.
+ */
+#define MLX5_CRYPTO_DEK_CALC_FREED(a) \
+ ({ typeof(a) _a = (a); \
+ _a->num_deks - _a->avail_deks - _a->in_use_deks; })
+
+#define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool)
+#define MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk) MLX5_CRYPTO_DEK_CALC_FREED(bulk)
+
+#define MLX5_CRYPTO_DEK_BULK_IDLE(bulk) \
+ ({ typeof(bulk) _bulk = (bulk); \
+ _bulk->avail_deks == _bulk->num_deks; })
+
+enum {
+ MLX5_CRYPTO_DEK_ALL_TYPE = BIT(0),
+};
+
+struct mlx5_crypto_dek_pool {
+ struct mlx5_core_dev *mdev;
+ u32 key_purpose;
+ int num_deks; /* the total number of keys in this pool */
+ int avail_deks; /* the number of available keys in this pool */
+ int in_use_deks; /* the number of being used keys in this pool */
+ struct mutex lock; /* protect the following lists, and the bulks */
+ struct list_head partial_list; /* some of keys are available */
+ struct list_head full_list; /* no available keys */
+ struct list_head avail_list; /* all keys are available to use */
+
+ /* No in-used keys, and all need to be synced.
+ * These bulks will be put to avail list after sync.
+ */
+ struct list_head sync_list;
+
+ bool syncing;
+ struct list_head wait_for_free;
+ struct work_struct sync_work;
+
+ spinlock_t destroy_lock; /* protect destroy_list */
+ struct list_head destroy_list;
+ struct work_struct destroy_work;
+};
+
+struct mlx5_crypto_dek_bulk {
+ struct mlx5_core_dev *mdev;
+ int base_obj_id;
+ int avail_start; /* the bit to start search */
+ int num_deks; /* the total number of keys in a bulk */
+ int avail_deks; /* the number of keys available, with need_sync bit 0 */
+ int in_use_deks; /* the number of keys being used, with in_use bit 1 */
+ struct list_head entry;
+
+ /* 0: not being used by any user, 1: otherwise */
+ unsigned long *in_use;
+
+ /* The bits are set when they are used, and reset after crypto_sync
+ * is executed. So, the value 0 means the key is newly created, or not
+ * used after sync, and 1 means it is in use, or freed but not synced
+ */
+ unsigned long *need_sync;
+};
+
+struct mlx5_crypto_dek_priv {
+ struct mlx5_core_dev *mdev;
+ int log_dek_obj_range;
+};
+
+struct mlx5_crypto_dek {
+ struct mlx5_crypto_dek_bulk *bulk;
+ struct list_head entry;
+ u32 obj_id;
+};
+
+u32 mlx5_crypto_dek_get_id(struct mlx5_crypto_dek *dek)
+{
+ return dek->obj_id;
+}
+
+static int mlx5_crypto_dek_get_key_sz(struct mlx5_core_dev *mdev,
+ u32 sz_bytes, u8 *key_sz_p)
+{
+ u32 sz_bits = sz_bytes * BITS_PER_BYTE;
switch (sz_bits) {
case 128:
- general_obj_key_size =
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
- key_p += sz_bytes;
+ *key_sz_p = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
break;
case 256:
- general_obj_key_size =
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256;
+ *key_sz_p = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256;
break;
default:
+ mlx5_core_err(mdev, "Crypto offload error, invalid key size (%u bits)\n",
+ sz_bits);
return -EINVAL;
}
- memcpy(key_p, key, sz_bytes);
+ return 0;
+}
+
+static int mlx5_crypto_dek_fill_key(struct mlx5_core_dev *mdev, u8 *key_obj,
+ const void *key, u32 sz_bytes)
+{
+ void *dst;
+ u8 key_sz;
+ int err;
+
+ err = mlx5_crypto_dek_get_key_sz(mdev, sz_bytes, &key_sz);
+ if (err)
+ return err;
+
+ MLX5_SET(encryption_key_obj, key_obj, key_size, key_sz);
+
+ if (sz_bytes == 16)
+ /* For key size of 128b the MSBs are reserved. */
+ dst = MLX5_ADDR_OF(encryption_key_obj, key_obj, key[1]);
+ else
+ dst = MLX5_ADDR_OF(encryption_key_obj, key_obj, key);
+
+ memcpy(dst, key, sz_bytes);
+
+ return 0;
+}
+
+static int mlx5_crypto_cmd_sync_crypto(struct mlx5_core_dev *mdev,
+ int crypto_type)
+{
+ u32 in[MLX5_ST_SZ_DW(sync_crypto_in)] = {};
+ int err;
+
+ mlx5_core_dbg(mdev,
+ "Execute SYNC_CRYPTO command with crypto_type(0x%x)\n",
+ crypto_type);
+
+ MLX5_SET(sync_crypto_in, in, opcode, MLX5_CMD_OP_SYNC_CRYPTO);
+ MLX5_SET(sync_crypto_in, in, crypto_type, crypto_type);
+
+ err = mlx5_cmd_exec_in(mdev, sync_crypto, in);
+ if (err)
+ mlx5_core_err(mdev,
+ "Failed to exec sync crypto, type=%d, err=%d\n",
+ crypto_type, err);
+
+ return err;
+}
+
+static int mlx5_crypto_create_dek_bulk(struct mlx5_core_dev *mdev,
+ u32 key_purpose, int log_obj_range,
+ u32 *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *obj, *param;
+ int err;
- MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
- MLX5_SET(encryption_key_obj, obj, key_type, key_type);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_create_param, param, log_obj_range, log_obj_range);
+
+ obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
+ MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ mlx5_core_dbg(mdev, "DEK objects created, bulk=%d, obj_id=%d\n",
+ 1 << log_obj_range, *obj_id);
+
+ return 0;
+}
+
+static int mlx5_crypto_modify_dek_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes, u32 key_purpose,
+ u32 obj_id, u32 obj_offset)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *obj, *param;
+ int err;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_query_param, param, obj_offset, obj_offset);
+
+ obj = MLX5_ADDR_OF(modify_encryption_key_in, in, encryption_key_object);
+ MLX5_SET64(encryption_key_obj, obj, modify_field_select, 1);
+ MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
+ MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
+
+ err = mlx5_crypto_dek_fill_key(mdev, obj, key, sz_bytes);
+ if (err)
+ return err;
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+
+ /* avoid leaking key on the stack */
+ memzero_explicit(in, sizeof(in));
+
+ return err;
+}
+
+static int mlx5_crypto_create_dek_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes,
+ u32 key_purpose, u32 *p_key_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u64 general_obj_types;
+ void *obj;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY))
+ return -EINVAL;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+
+ obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
+ MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
+ MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
+
+ err = mlx5_crypto_dek_fill_key(mdev, obj, key, sz_bytes);
+ if (err)
+ return err;
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*p_key_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
@@ -58,7 +258,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
return err;
}
-void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
+static void mlx5_crypto_destroy_dek_key(struct mlx5_core_dev *mdev, u32 key_id)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
@@ -71,3 +271,504 @@ void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
+
+int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes,
+ u32 key_type, u32 *p_key_id)
+{
+ return mlx5_crypto_create_dek_key(mdev, key, sz_bytes, key_type, p_key_id);
+}
+
+void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
+{
+ mlx5_crypto_destroy_dek_key(mdev, key_id);
+}
+
+static struct mlx5_crypto_dek_bulk *
+mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv;
+ struct mlx5_core_dev *mdev = pool->mdev;
+ struct mlx5_crypto_dek_bulk *bulk;
+ int num_deks, base_obj_id;
+ int err;
+
+ bulk = kzalloc(sizeof(*bulk), GFP_KERNEL);
+ if (!bulk)
+ return ERR_PTR(-ENOMEM);
+
+ num_deks = 1 << dek_priv->log_dek_obj_range;
+ bulk->need_sync = bitmap_zalloc(num_deks, GFP_KERNEL);
+ if (!bulk->need_sync) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ bulk->in_use = bitmap_zalloc(num_deks, GFP_KERNEL);
+ if (!bulk->in_use) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose,
+ dek_priv->log_dek_obj_range,
+ &base_obj_id);
+ if (err)
+ goto err_out;
+
+ bulk->base_obj_id = base_obj_id;
+ bulk->num_deks = num_deks;
+ bulk->avail_deks = num_deks;
+ bulk->mdev = mdev;
+
+ return bulk;
+
+err_out:
+ bitmap_free(bulk->in_use);
+ bitmap_free(bulk->need_sync);
+ kfree(bulk);
+ return ERR_PTR(err);
+}
+
+static struct mlx5_crypto_dek_bulk *
+mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_bulk *bulk;
+
+ bulk = mlx5_crypto_dek_bulk_create(pool);
+ if (IS_ERR(bulk))
+ return bulk;
+
+ pool->avail_deks += bulk->num_deks;
+ pool->num_deks += bulk->num_deks;
+ list_add(&bulk->entry, &pool->partial_list);
+
+ return bulk;
+}
+
+static void mlx5_crypto_dek_bulk_free(struct mlx5_crypto_dek_bulk *bulk)
+{
+ mlx5_crypto_destroy_dek_key(bulk->mdev, bulk->base_obj_id);
+ bitmap_free(bulk->need_sync);
+ bitmap_free(bulk->in_use);
+ kfree(bulk);
+}
+
+static void mlx5_crypto_dek_pool_remove_bulk(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek_bulk *bulk,
+ bool delay)
+{
+ pool->num_deks -= bulk->num_deks;
+ pool->avail_deks -= bulk->avail_deks;
+ pool->in_use_deks -= bulk->in_use_deks;
+ list_del(&bulk->entry);
+ if (!delay)
+ mlx5_crypto_dek_bulk_free(bulk);
+}
+
+static struct mlx5_crypto_dek_bulk *
+mlx5_crypto_dek_pool_pop(struct mlx5_crypto_dek_pool *pool, u32 *obj_offset)
+{
+ struct mlx5_crypto_dek_bulk *bulk;
+ int pos;
+
+ mutex_lock(&pool->lock);
+ bulk = list_first_entry_or_null(&pool->partial_list,
+ struct mlx5_crypto_dek_bulk, entry);
+
+ if (bulk) {
+ pos = find_next_zero_bit(bulk->need_sync, bulk->num_deks,
+ bulk->avail_start);
+ if (pos == bulk->num_deks) {
+ mlx5_core_err(pool->mdev, "Wrong DEK bulk avail_start.\n");
+ pos = find_first_zero_bit(bulk->need_sync, bulk->num_deks);
+ }
+ WARN_ON(pos == bulk->num_deks);
+ } else {
+ bulk = list_first_entry_or_null(&pool->avail_list,
+ struct mlx5_crypto_dek_bulk,
+ entry);
+ if (bulk) {
+ list_move(&bulk->entry, &pool->partial_list);
+ } else {
+ bulk = mlx5_crypto_dek_pool_add_bulk(pool);
+ if (IS_ERR(bulk))
+ goto out;
+ }
+ pos = 0;
+ }
+
+ *obj_offset = pos;
+ bitmap_set(bulk->need_sync, pos, 1);
+ bitmap_set(bulk->in_use, pos, 1);
+ bulk->in_use_deks++;
+ bulk->avail_deks--;
+ if (!bulk->avail_deks) {
+ list_move(&bulk->entry, &pool->full_list);
+ bulk->avail_start = bulk->num_deks;
+ } else {
+ bulk->avail_start = pos + 1;
+ }
+ pool->avail_deks--;
+ pool->in_use_deks++;
+
+out:
+ mutex_unlock(&pool->lock);
+ return bulk;
+}
+
+static bool mlx5_crypto_dek_need_sync(struct mlx5_crypto_dek_pool *pool)
+{
+ return !pool->syncing &&
+ MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) > MLX5_CRYPTO_DEK_POOL_SYNC_THRESH;
+}
+
+static int mlx5_crypto_dek_free_locked(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek *dek)
+{
+ struct mlx5_crypto_dek_bulk *bulk = dek->bulk;
+ int obj_offset;
+ bool old_val;
+ int err = 0;
+
+ obj_offset = dek->obj_id - bulk->base_obj_id;
+ old_val = test_and_clear_bit(obj_offset, bulk->in_use);
+ WARN_ON_ONCE(!old_val);
+ if (!old_val) {
+ err = -ENOENT;
+ goto out_free;
+ }
+ pool->in_use_deks--;
+ bulk->in_use_deks--;
+ if (!bulk->avail_deks && !bulk->in_use_deks)
+ list_move(&bulk->entry, &pool->sync_list);
+
+ if (mlx5_crypto_dek_need_sync(pool) && schedule_work(&pool->sync_work))
+ pool->syncing = true;
+
+out_free:
+ kfree(dek);
+ return err;
+}
+
+static int mlx5_crypto_dek_pool_push(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek *dek)
+{
+ int err = 0;
+
+ mutex_lock(&pool->lock);
+ if (pool->syncing)
+ list_add(&dek->entry, &pool->wait_for_free);
+ else
+ err = mlx5_crypto_dek_free_locked(pool, dek);
+ mutex_unlock(&pool->lock);
+
+ return err;
+}
+
+/* Update the bits for a bulk while sync, and avail_next for search.
+ * As the combinations of (need_sync, in_use) of one DEK are
+ * - (0,0) means the key is ready for use,
+ * - (1,1) means the key is currently being used by a user,
+ * - (1,0) means the key is freed, and waiting for being synced,
+ * - (0,1) is invalid state.
+ * the number of revalidated DEKs can be calculated by
+ * hweight_long(need_sync XOR in_use), and the need_sync bits can be reset
+ * by simply copying from in_use bits.
+ */
+static void mlx5_crypto_dek_bulk_reset_synced(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek_bulk *bulk)
+{
+ unsigned long *need_sync = bulk->need_sync;
+ unsigned long *in_use = bulk->in_use;
+ int i, freed, reused, avail_next;
+ bool first = true;
+
+ freed = MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk);
+
+ for (i = 0; freed && i < BITS_TO_LONGS(bulk->num_deks);
+ i++, need_sync++, in_use++) {
+ reused = hweight_long((*need_sync) ^ (*in_use));
+ if (!reused)
+ continue;
+
+ bulk->avail_deks += reused;
+ pool->avail_deks += reused;
+ *need_sync = *in_use;
+ if (first) {
+ avail_next = i * BITS_PER_TYPE(long);
+ if (bulk->avail_start > avail_next)
+ bulk->avail_start = avail_next;
+ first = false;
+ }
+
+ freed -= reused;
+ }
+}
+
+/* Return true if the bulk is reused, false if destroyed with delay */
+static bool mlx5_crypto_dek_bulk_handle_avail(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek_bulk *bulk,
+ struct list_head *destroy_list)
+{
+ if (list_empty(&pool->avail_list)) {
+ list_move(&bulk->entry, &pool->avail_list);
+ return true;
+ }
+
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, true);
+ list_add(&bulk->entry, destroy_list);
+ return false;
+}
+
+static void mlx5_crypto_dek_pool_splice_destroy_list(struct mlx5_crypto_dek_pool *pool,
+ struct list_head *list,
+ struct list_head *head)
+{
+ spin_lock(&pool->destroy_lock);
+ list_splice_init(list, head);
+ spin_unlock(&pool->destroy_lock);
+}
+
+static void mlx5_crypto_dek_pool_free_wait_keys(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek *dek, *next;
+
+ list_for_each_entry_safe(dek, next, &pool->wait_for_free, entry) {
+ list_del(&dek->entry);
+ mlx5_crypto_dek_free_locked(pool, dek);
+ }
+}
+
+/* For all the bulks in each list, reset the bits while sync.
+ * Move them to different lists according to the number of available DEKs.
+ * Destrory all the idle bulks, except one for quick service.
+ * And free DEKs in the waiting list at the end of this func.
+ */
+static void mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_bulk *bulk, *tmp;
+ LIST_HEAD(destroy_list);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry) {
+ mlx5_crypto_dek_bulk_reset_synced(pool, bulk);
+ if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
+ mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
+ }
+
+ list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry) {
+ mlx5_crypto_dek_bulk_reset_synced(pool, bulk);
+
+ if (!bulk->avail_deks)
+ continue;
+
+ if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
+ mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
+ else
+ list_move(&bulk->entry, &pool->partial_list);
+ }
+
+ list_for_each_entry_safe(bulk, tmp, &pool->sync_list, entry) {
+ bulk->avail_deks = bulk->num_deks;
+ pool->avail_deks += bulk->num_deks;
+ if (mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list)) {
+ memset(bulk->need_sync, 0, BITS_TO_BYTES(bulk->num_deks));
+ bulk->avail_start = 0;
+ }
+ }
+
+ mlx5_crypto_dek_pool_free_wait_keys(pool);
+
+ if (!list_empty(&destroy_list)) {
+ mlx5_crypto_dek_pool_splice_destroy_list(pool, &destroy_list,
+ &pool->destroy_list);
+ schedule_work(&pool->destroy_work);
+ }
+}
+
+static void mlx5_crypto_dek_sync_work_fn(struct work_struct *work)
+{
+ struct mlx5_crypto_dek_pool *pool =
+ container_of(work, struct mlx5_crypto_dek_pool, sync_work);
+ int err;
+
+ err = mlx5_crypto_cmd_sync_crypto(pool->mdev, BIT(pool->key_purpose));
+ mutex_lock(&pool->lock);
+ if (!err)
+ mlx5_crypto_dek_pool_reset_synced(pool);
+ pool->syncing = false;
+ mutex_unlock(&pool->lock);
+}
+
+struct mlx5_crypto_dek *mlx5_crypto_dek_create(struct mlx5_crypto_dek_pool *dek_pool,
+ const void *key, u32 sz_bytes)
+{
+ struct mlx5_crypto_dek_priv *dek_priv = dek_pool->mdev->mlx5e_res.dek_priv;
+ struct mlx5_core_dev *mdev = dek_pool->mdev;
+ u32 key_purpose = dek_pool->key_purpose;
+ struct mlx5_crypto_dek_bulk *bulk;
+ struct mlx5_crypto_dek *dek;
+ int obj_offset;
+ int err;
+
+ dek = kzalloc(sizeof(*dek), GFP_KERNEL);
+ if (!dek)
+ return ERR_PTR(-ENOMEM);
+
+ if (!dek_priv) {
+ err = mlx5_crypto_create_dek_key(mdev, key, sz_bytes,
+ key_purpose, &dek->obj_id);
+ goto out;
+ }
+
+ bulk = mlx5_crypto_dek_pool_pop(dek_pool, &obj_offset);
+ if (IS_ERR(bulk)) {
+ err = PTR_ERR(bulk);
+ goto out;
+ }
+
+ dek->bulk = bulk;
+ dek->obj_id = bulk->base_obj_id + obj_offset;
+ err = mlx5_crypto_modify_dek_key(mdev, key, sz_bytes, key_purpose,
+ bulk->base_obj_id, obj_offset);
+ if (err) {
+ mlx5_crypto_dek_pool_push(dek_pool, dek);
+ return ERR_PTR(err);
+ }
+
+out:
+ if (err) {
+ kfree(dek);
+ return ERR_PTR(err);
+ }
+
+ return dek;
+}
+
+void mlx5_crypto_dek_destroy(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek)
+{
+ struct mlx5_crypto_dek_priv *dek_priv = dek_pool->mdev->mlx5e_res.dek_priv;
+ struct mlx5_core_dev *mdev = dek_pool->mdev;
+
+ if (!dek_priv) {
+ mlx5_crypto_destroy_dek_key(mdev, dek->obj_id);
+ kfree(dek);
+ } else {
+ mlx5_crypto_dek_pool_push(dek_pool, dek);
+ }
+}
+
+static void mlx5_crypto_dek_free_destroy_list(struct list_head *destroy_list)
+{
+ struct mlx5_crypto_dek_bulk *bulk, *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, destroy_list, entry)
+ mlx5_crypto_dek_bulk_free(bulk);
+}
+
+static void mlx5_crypto_dek_destroy_work_fn(struct work_struct *work)
+{
+ struct mlx5_crypto_dek_pool *pool =
+ container_of(work, struct mlx5_crypto_dek_pool, destroy_work);
+ LIST_HEAD(destroy_list);
+
+ mlx5_crypto_dek_pool_splice_destroy_list(pool, &pool->destroy_list,
+ &destroy_list);
+ mlx5_crypto_dek_free_destroy_list(&destroy_list);
+}
+
+struct mlx5_crypto_dek_pool *
+mlx5_crypto_dek_pool_create(struct mlx5_core_dev *mdev, int key_purpose)
+{
+ struct mlx5_crypto_dek_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->mdev = mdev;
+ pool->key_purpose = key_purpose;
+
+ mutex_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->avail_list);
+ INIT_LIST_HEAD(&pool->partial_list);
+ INIT_LIST_HEAD(&pool->full_list);
+ INIT_LIST_HEAD(&pool->sync_list);
+ INIT_LIST_HEAD(&pool->wait_for_free);
+ INIT_WORK(&pool->sync_work, mlx5_crypto_dek_sync_work_fn);
+ spin_lock_init(&pool->destroy_lock);
+ INIT_LIST_HEAD(&pool->destroy_list);
+ INIT_WORK(&pool->destroy_work, mlx5_crypto_dek_destroy_work_fn);
+
+ return pool;
+}
+
+void mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_bulk *bulk, *tmp;
+
+ cancel_work_sync(&pool->sync_work);
+ cancel_work_sync(&pool->destroy_work);
+
+ mlx5_crypto_dek_pool_free_wait_keys(pool);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->avail_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->sync_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ mlx5_crypto_dek_free_destroy_list(&pool->destroy_list);
+
+ mutex_destroy(&pool->lock);
+
+ kfree(pool);
+}
+
+void mlx5_crypto_dek_cleanup(struct mlx5_crypto_dek_priv *dek_priv)
+{
+ if (!dek_priv)
+ return;
+
+ kfree(dek_priv);
+}
+
+struct mlx5_crypto_dek_priv *mlx5_crypto_dek_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_crypto_dek_priv *dek_priv;
+ int err;
+
+ if (!MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc))
+ return NULL;
+
+ dek_priv = kzalloc(sizeof(*dek_priv), GFP_KERNEL);
+ if (!dek_priv)
+ return ERR_PTR(-ENOMEM);
+
+ dek_priv->mdev = mdev;
+ dek_priv->log_dek_obj_range = min_t(int, 12,
+ MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc));
+
+ /* sync all types of objects */
+ err = mlx5_crypto_cmd_sync_crypto(mdev, MLX5_CRYPTO_DEK_ALL_TYPE);
+ if (err)
+ goto err_sync_crypto;
+
+ mlx5_core_dbg(mdev, "Crypto DEK enabled, %d deks per alloc (max %d), total %d\n",
+ 1 << dek_priv->log_dek_obj_range,
+ 1 << MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc),
+ 1 << MLX5_CAP_CRYPTO(mdev, log_max_num_deks));
+
+ return dek_priv;
+
+err_sync_crypto:
+ kfree(dek_priv);
+ return ERR_PTR(err);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
new file mode 100644
index 000000000000..c819c047bb9c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_CRYPTO_H__
+#define __MLX5_LIB_CRYPTO_H__
+
+enum {
+ MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS,
+ MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC,
+ MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC,
+ MLX5_ACCEL_OBJ_TYPE_KEY_NUM,
+};
+
+int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes,
+ u32 key_type, u32 *p_key_id);
+
+void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+
+struct mlx5_crypto_dek_pool;
+struct mlx5_crypto_dek;
+
+struct mlx5_crypto_dek_pool *mlx5_crypto_dek_pool_create(struct mlx5_core_dev *mdev,
+ int key_purpose);
+void mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool *pool);
+struct mlx5_crypto_dek *mlx5_crypto_dek_create(struct mlx5_crypto_dek_pool *dek_pool,
+ const void *key, u32 sz_bytes);
+void mlx5_crypto_dek_destroy(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek);
+u32 mlx5_crypto_dek_get_id(struct mlx5_crypto_dek *dek);
+
+struct mlx5_crypto_dek_priv *mlx5_crypto_dek_init(struct mlx5_core_dev *mdev);
+void mlx5_crypto_dek_cleanup(struct mlx5_crypto_dek_priv *dek_priv);
+#endif /* __MLX5_LIB_CRYPTO_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 032adb21ad4b..ccf12f7db6f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -79,28 +79,11 @@ struct mlx5_pme_stats {
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
-/* Crypto */
-enum {
- MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
- MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
- MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC,
-};
-
-int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
- void *key, u32 sz_bytes,
- u32 key_type, u32 *p_key_id);
-void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
-
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));
}
-static inline void mlx5_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev)
-{
- mdev->mlx5e_res.uplink_netdev = netdev;
-}
-
static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
{
return mdev->mlx5e_res.uplink_netdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8823f20d2122..26e1057845fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -336,6 +336,24 @@ static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
}
}
+void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *dev, struct net_device *netdev)
+{
+ mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
+ dev->mlx5e_res.uplink_netdev = netdev;
+ mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ netdev);
+ mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
+}
+
+void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev)
+{
+ mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
+ mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ dev->mlx5e_res.uplink_netdev);
+ mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
+}
+EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay);
+
static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode)
@@ -1555,6 +1573,7 @@ static const int types[] = {
MLX5_CAP_DEV_SHAMPO,
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
+ MLX5_CAP_CRYPTO,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1608,6 +1627,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
+ mutex_init(&dev->mlx5e_res.uplink_netdev_lock);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1696,6 +1716,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock);
mutex_destroy(&dev->intf_state_mutex);
lockdep_unregister_key(&dev->lock_key);
}
@@ -2110,7 +2131,7 @@ static int __init mlx5_init(void)
mlx5_core_verify_params();
mlx5_register_debugfs();
- err = pci_register_driver(&mlx5_core_driver);
+ err = mlx5e_init();
if (err)
goto err_debug;
@@ -2118,16 +2139,16 @@ static int __init mlx5_init(void)
if (err)
goto err_sf;
- err = mlx5e_init();
+ err = pci_register_driver(&mlx5_core_driver);
if (err)
- goto err_en;
+ goto err_pci;
return 0;
-err_en:
+err_pci:
mlx5_sf_driver_unregister();
err_sf:
- pci_unregister_driver(&mlx5_core_driver);
+ mlx5e_cleanup();
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -2135,9 +2156,9 @@ err_debug:
static void __exit mlx5_cleanup(void)
{
- mlx5e_cleanup();
- mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
+ mlx5_sf_driver_unregister();
+ mlx5e_cleanup();
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 60596357bfc7..64d4e7125e9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function)
return (u32)func_id | (ec_function << 16);
}
+static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
+{
+ if (!func_id)
+ return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
+
+ return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
+}
+
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
@@ -211,7 +219,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
if (n >= MLX5_NUM_4K_IN_PAGE) {
- mlx5_core_warn(dev, "alloc 4k bug\n");
+ mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
+ fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE);
return -ENOENT;
}
clear_bit(n, &fp->bitmask);
@@ -332,6 +341,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
int notify_fail = event;
+ u16 func_type;
u64 addr;
int err;
u32 *in;
@@ -383,11 +393,9 @@ retry:
goto out_dropped;
}
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] += npages;
dev->priv.fw_pages += npages;
- if (func_id)
- dev->priv.vfs_pages += npages;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
@@ -414,6 +422,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
struct rb_root *root;
struct rb_node *p;
int npages = 0;
+ u16 func_type;
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
@@ -428,11 +437,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
free_fwp(dev, fwp, fwp->free_count);
}
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] -= npages;
dev->priv.fw_pages -= npages;
- if (func_id)
- dev->priv.vfs_pages -= npages;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id);
@@ -498,6 +505,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
+ u16 func_type;
u32 *out;
int err;
int i;
@@ -549,11 +557,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
if (nclaimed)
*nclaimed = num_claimed;
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] -= num_claimed;
dev->priv.fw_pages -= num_claimed;
- if (func_id)
- dev->priv.vfs_pages -= num_claimed;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages -= num_claimed;
out_free:
kvfree(out);
@@ -706,12 +712,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages);
- WARN(dev->priv.vfs_pages,
+ WARN(dev->priv.page_counters[MLX5_VF],
"VFs FW pages counter is %d after reclaiming all pages\n",
- dev->priv.vfs_pages);
- WARN(dev->priv.host_pf_pages,
+ dev->priv.page_counters[MLX5_VF]);
+ WARN(dev->priv.page_counters[MLX5_HOST_PF],
"External host PF FW pages counter is %d after reclaiming all pages\n",
- dev->priv.host_pf_pages);
+ dev->priv.page_counters[MLX5_HOST_PF]);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index c0e6c487c63c..3008e9ce2bbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
- if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
+ if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b851141e03de..042ca0349124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -1138,12 +1138,14 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
rule->flow_source))
return 0;
+ mlx5dr_domain_nic_lock(nic_dmn);
+
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
- return ret;
+ goto err_unlock;
hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
if (likely(hw_ste_arr_is_opt)) {
@@ -1152,12 +1154,12 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
DR_STE_SIZE, GFP_KERNEL);
- if (!hw_ste_arr)
- return -ENOMEM;
+ if (!hw_ste_arr) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
}
- mlx5dr_domain_nic_lock(nic_dmn);
-
ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
if (ret)
goto free_hw_ste;
@@ -1223,7 +1225,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_unlock(nic_dmn);
- goto out;
+ if (unlikely(!hw_ste_arr_is_opt))
+ kfree(hw_ste_arr);
+
+ return 0;
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
@@ -1238,12 +1243,12 @@ remove_from_nic_tbl:
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste:
- mlx5dr_domain_nic_unlock(nic_dmn);
-
-out:
- if (unlikely(!hw_ste_arr_is_opt))
+ if (!hw_ste_arr_is_opt)
kfree(hw_ste_arr);
+err_unlock:
+ mlx5dr_domain_nic_unlock(nic_dmn);
+
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f8623e8388c8..22db0bb15c45 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1750,29 +1750,12 @@ static const struct devlink_ops mlxsw_devlink_ops = {
static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
{
- int err;
-
- err = mlxsw_core_fw_params_register(mlxsw_core);
- if (err)
- return err;
-
- if (mlxsw_core->driver->params_register) {
- err = mlxsw_core->driver->params_register(mlxsw_core);
- if (err)
- goto err_params_register;
- }
- return 0;
-
-err_params_register:
- mlxsw_core_fw_params_unregister(mlxsw_core);
- return err;
+ return mlxsw_core_fw_params_register(mlxsw_core);
}
static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
{
mlxsw_core_fw_params_unregister(mlxsw_core);
- if (mlxsw_core->driver->params_register)
- mlxsw_core->driver->params_unregister(mlxsw_core);
}
struct mlxsw_core_health_event {
@@ -2201,6 +2184,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_devlink_alloc;
}
devl_lock(devlink);
+ devl_register(devlink);
}
mlxsw_core = devlink_priv(devlink);
@@ -2284,11 +2268,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_driver_init;
}
- if (!reload) {
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ if (!reload)
devl_unlock(devlink);
- devlink_register(devlink);
- }
return 0;
err_driver_init:
@@ -2320,6 +2301,7 @@ err_register_resources:
err_bus_init:
mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
+ devl_unregister(devlink);
devl_unlock(devlink);
devlink_free(devlink);
}
@@ -2358,10 +2340,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (!reload) {
- devlink_unregister(devlink);
+ if (!reload)
devl_lock(devlink);
- }
if (devlink_is_reload_failed(devlink)) {
if (!reload)
@@ -2390,6 +2370,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
+ devl_unregister(devlink);
devl_unlock(devlink);
devlink_free(devlink);
}
@@ -2399,6 +2380,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
devl_resources_unregister(devlink);
+ devl_unregister(devlink);
devl_unlock(devlink);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index a77cb0be7108..e5474d3e34db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -421,8 +421,6 @@ struct mlxsw_driver {
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
- int (*params_register)(struct mlxsw_core *mlxsw_core);
- void (*params_unregister)(struct mlxsw_core *mlxsw_core);
/* Notify a driver that a timestamped packet was transmitted. Driver
* is responsible for freeing the passed-in SKB.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index b0bdb9640ebf..a8f94b7544ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3861,62 +3861,6 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
return 0;
}
-static int
-mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-
- ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
- return 0;
-}
-
-static int
-mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-
- return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
-}
-
-static const struct devlink_param mlxsw_sp2_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
- "acl_region_rehash_interval",
- DEVLINK_PARAM_TYPE_U32,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlxsw_sp_params_acl_region_rehash_intrvl_get,
- mlxsw_sp_params_acl_region_rehash_intrvl_set,
- NULL),
-};
-
-static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
-{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- union devlink_param_value value;
- int err;
-
- err = devl_params_register(devlink, mlxsw_sp2_devlink_params,
- ARRAY_SIZE(mlxsw_sp2_devlink_params));
- if (err)
- return err;
-
- value.vu32 = 0;
- devl_param_driverinit_value_set(devlink,
- MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
- value);
- return 0;
-}
-
-static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
-{
- devl_params_unregister(priv_to_devlink(mlxsw_core),
- mlxsw_sp2_devlink_params,
- ARRAY_SIZE(mlxsw_sp2_devlink_params));
-}
-
static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port)
{
@@ -3994,8 +3938,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp2_params_register,
- .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
@@ -4033,8 +3975,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp2_params_register,
- .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
@@ -4070,8 +4010,6 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp2_params_register,
- .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp4_config_profile,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bbc73324451d..4c22f8004514 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -973,6 +973,7 @@ enum mlxsw_sp_acl_profile {
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl);
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
@@ -1096,8 +1097,6 @@ mlxsw_sp_acl_act_cookie_lookup(struct mlxsw_sp *mlxsw_sp, u32 cookie_index)
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
-u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val);
struct mlxsw_sp_acl_mangle_action;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 6c5af018546f..0423ac262d89 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -40,6 +40,11 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
+struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl)
+{
+ return &acl->tcam;
+}
+
struct mlxsw_sp_acl_ruleset_ht_key {
struct mlxsw_sp_flow_block *block;
u32 chain_index;
@@ -1099,22 +1104,6 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
kfree(acl);
}
-u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-
- return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
- &acl->tcam);
-}
-
-int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
-{
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-
- return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
- &acl->tcam, val);
-}
-
struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
.act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 3b9ba8fa247a..d50786b0a6ce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <net/devlink.h>
#include <trace/events/mlxsw.h>
#include "reg.h"
@@ -29,67 +30,6 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
-int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- u64 max_tcam_regions;
- u64 max_regions;
- u64 max_groups;
- int err;
-
- mutex_init(&tcam->lock);
- tcam->vregion_rehash_intrvl =
- MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
- INIT_LIST_HEAD(&tcam->vregion_list);
-
- max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- ACL_MAX_TCAM_REGIONS);
- max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
-
- /* Use 1:1 mapping between ACL region and TCAM region */
- if (max_tcam_regions < max_regions)
- max_regions = max_tcam_regions;
-
- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
- if (!tcam->used_regions)
- return -ENOMEM;
- tcam->max_regions = max_regions;
-
- max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
- if (!tcam->used_groups) {
- err = -ENOMEM;
- goto err_alloc_used_groups;
- }
- tcam->max_groups = max_groups;
- tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- ACL_MAX_GROUP_SIZE);
-
- err = ops->init(mlxsw_sp, tcam->priv, tcam);
- if (err)
- goto err_tcam_init;
-
- return 0;
-
-err_tcam_init:
- bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
- bitmap_free(tcam->used_regions);
- return err;
-}
-
-void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
-
- mutex_destroy(&tcam->lock);
- ops->fini(mlxsw_sp, tcam->priv);
- bitmap_free(tcam->used_groups);
- bitmap_free(tcam->used_regions);
-}
-
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 *priority, bool fillup_priority)
@@ -893,41 +833,6 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
kfree(vregion);
}
-u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- u32 vregion_rehash_intrvl;
-
- if (WARN_ON(!ops->region_rehash_hints_get))
- return 0;
- vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
- return vregion_rehash_intrvl;
-}
-
-int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam,
- u32 val)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- struct mlxsw_sp_acl_tcam_vregion *vregion;
-
- if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
- return -EINVAL;
- if (WARN_ON(!ops->region_rehash_hints_get))
- return -EOPNOTSUPP;
- tcam->vregion_rehash_intrvl = val;
- mutex_lock(&tcam->lock);
- list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
- if (val)
- mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
- else
- cancel_delayed_work_sync(&vregion->rehash.dw);
- }
- mutex_unlock(&tcam->lock);
- return 0;
-}
-
static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
@@ -1542,6 +1447,153 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
}
+static int
+mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp_acl_tcam *tcam;
+ struct mlxsw_sp *mlxsw_sp;
+
+ mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
+ ctx->val.vu32 = tcam->vregion_rehash_intrvl;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ struct mlxsw_sp_acl_tcam *tcam;
+ struct mlxsw_sp *mlxsw_sp;
+ u32 val = ctx->val.vu32;
+
+ if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
+ return -EINVAL;
+
+ mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
+ tcam->vregion_rehash_intrvl = val;
+ mutex_lock(&tcam->lock);
+ list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
+ if (val)
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
+ else
+ cancel_delayed_work_sync(&vregion->rehash.dw);
+ }
+ mutex_unlock(&tcam->lock);
+ return 0;
+}
+
+static const struct devlink_param mlxsw_sp_acl_tcam_rehash_params[] = {
+ DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
+ "acl_region_rehash_interval",
+ DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlxsw_sp_acl_tcam_region_rehash_intrvl_get,
+ mlxsw_sp_acl_tcam_region_rehash_intrvl_set,
+ NULL),
+};
+
+static int mlxsw_sp_acl_tcam_rehash_params_register(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
+ return 0;
+
+ return devl_params_register(devlink, mlxsw_sp_acl_tcam_rehash_params,
+ ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
+}
+
+static void
+mlxsw_sp_acl_tcam_rehash_params_unregister(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
+ return;
+
+ devl_params_unregister(devlink, mlxsw_sp_acl_tcam_rehash_params,
+ ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
+}
+
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ u64 max_tcam_regions;
+ u64 max_regions;
+ u64 max_groups;
+ int err;
+
+ mutex_init(&tcam->lock);
+ tcam->vregion_rehash_intrvl =
+ MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
+ INIT_LIST_HEAD(&tcam->vregion_list);
+
+ err = mlxsw_sp_acl_tcam_rehash_params_register(mlxsw_sp);
+ if (err)
+ goto err_rehash_params_register;
+
+ max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_TCAM_REGIONS);
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+
+ /* Use 1:1 mapping between ACL region and TCAM region */
+ if (max_tcam_regions < max_regions)
+ max_regions = max_tcam_regions;
+
+ tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
+ if (!tcam->used_regions) {
+ err = -ENOMEM;
+ goto err_alloc_used_regions;
+ }
+ tcam->max_regions = max_regions;
+
+ max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
+ tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
+ if (!tcam->used_groups) {
+ err = -ENOMEM;
+ goto err_alloc_used_groups;
+ }
+ tcam->max_groups = max_groups;
+ tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_GROUP_SIZE);
+
+ err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ if (err)
+ goto err_tcam_init;
+
+ return 0;
+
+err_tcam_init:
+ bitmap_free(tcam->used_groups);
+err_alloc_used_groups:
+ bitmap_free(tcam->used_regions);
+err_alloc_used_regions:
+ mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
+err_rehash_params_register:
+ mutex_destroy(&tcam->lock);
+ return err;
+}
+
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->fini(mlxsw_sp, tcam->priv);
+ bitmap_free(tcam->used_groups);
+ bitmap_free(tcam->used_regions);
+ mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
+ mutex_destroy(&tcam->lock);
+}
+
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
MLXSW_AFK_ELEMENT_DMAC_32_47,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index edbbc89e7a71..462bf448497d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -29,11 +29,6 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
-u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam);
-int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam,
- u32 val);
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 *priority, bool fillup_priority);
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 56afd694f3c7..7b0cda4ffa6b 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -15,5 +15,7 @@ lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_xdp.o lan966x_vcap_impl.o lan966x_vcap_ag_api.o \
lan966x_tc_flower.o lan966x_goto.o
+lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
+
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 580c91d24a52..47b37ab9d7d5 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -1035,6 +1035,8 @@ static int lan966x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lan966x);
lan966x->dev = &pdev->dev;
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
if (!device_get_mac_address(&pdev->dev, mac_addr)) {
ether_addr_copy(lan966x->base_mac, mac_addr);
} else {
@@ -1223,6 +1225,8 @@ static int lan966x_remove(struct platform_device *pdev)
lan966x_fdb_deinit(lan966x);
lan966x_ptp_deinit(lan966x);
+ debugfs_remove_recursive(lan966x->debugfs_root);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 0106f9487cbe..49f5159afbf3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -3,6 +3,7 @@
#ifndef __LAN966X_MAIN_H__
#define __LAN966X_MAIN_H__
+#include <linux/debugfs.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
@@ -14,6 +15,9 @@
#include <net/pkt_sched.h>
#include <net/switchdev.h>
+#include <vcap_api.h>
+#include <vcap_api_client.h>
+
#include "lan966x_regs.h"
#include "lan966x_ifh.h"
@@ -128,6 +132,13 @@ enum LAN966X_PORT_MASK_MODE {
LAN966X_PMM_REDIRECT,
};
+enum vcap_is2_port_sel_ipv6 {
+ VCAP_IS2_PS_IPV6_TCPUDP_OTHER,
+ VCAP_IS2_PS_IPV6_STD,
+ VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER,
+ VCAP_IS2_PS_IPV6_MAC_ETYPE,
+};
+
struct lan966x_port;
struct lan966x_db {
@@ -315,6 +326,9 @@ struct lan966x {
/* vcap */
struct vcap_control *vcap_ctrl;
+
+ /* debugfs */
+ struct dentry *debugfs_root;
};
struct lan966x_port_config {
@@ -601,9 +615,22 @@ static inline bool lan966x_xdp_port_present(struct lan966x_port *port)
int lan966x_vcap_init(struct lan966x *lan966x);
void lan966x_vcap_deinit(struct lan966x *lan966x);
+#if defined(CONFIG_DEBUG_FS)
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out);
+#else
+static inline int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+#endif
int lan966x_tc_flower(struct lan966x_port *port,
- struct flow_cls_offload *f);
+ struct flow_cls_offload *f,
+ bool ingress);
int lan966x_goto_port_add(struct lan966x_port *port,
int from_cid, int to_cid,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
index 01072121c999..cf0cc7562d04 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include "lan966x_main.h"
@@ -70,7 +71,7 @@ static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSMATCHALL:
return lan966x_tc_matchall(port, type_data, ingress);
case TC_SETUP_CLSFLOWER:
- return lan966x_tc_flower(port, type_data);
+ return lan966x_tc_flower(port, type_data, ingress);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
index b66a8725a071..bd10a7189741 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -3,53 +3,135 @@
#include "lan966x_main.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#include "vcap_tc.h"
-struct lan966x_tc_flower_parse_usage {
- struct flow_cls_offload *f;
- struct flow_rule *frule;
- struct vcap_rule *vrule;
- unsigned int used_keys;
- u16 l3_proto;
-};
+static bool lan966x_tc_is_known_etype(u16 etype)
+{
+ switch (etype) {
+ case ETH_P_ALL:
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ return true;
+ }
+
+ return false;
+}
-static int lan966x_tc_flower_handler_ethaddr_usage(struct lan966x_tc_flower_parse_usage *st)
+static int
+lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
- enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
- enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
- struct flow_match_eth_addrs match;
- struct vcap_u48_key smac, dmac;
+ struct flow_match_control match;
int err = 0;
- flow_rule_match_eth_addrs(st->frule, &match);
-
- if (!is_zero_ether_addr(match.mask->src)) {
- vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
- vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
+ flow_rule_match_control(st->frule, &match);
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_1);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_0);
if (err)
goto out;
}
- if (!is_zero_ether_addr(match.mask->dst)) {
- vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
- vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (match.key->flags & FLOW_DIS_FIRST_FRAG)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_0);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_1);
if (err)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
return err;
out:
- NL_SET_ERR_MSG_MOD(st->f->common.extack, "eth_addr parse error");
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
+ return err;
+}
+
+static int
+lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_basic match;
+ int err = 0;
+
+ flow_rule_match_basic(st->frule, &match);
+ if (match.mask->n_proto) {
+ st->l3_proto = be16_to_cpu(match.key->n_proto);
+ if (!lan966x_tc_is_known_etype(st->l3_proto)) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IP) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+ }
+ if (match.mask->ip_proto) {
+ st->l4_proto = match.key->ip_proto;
+
+ if (st->l4_proto == IPPROTO_TCP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l4_proto == IPPROTO_UDP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ } else {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP_PROTO,
+ st->l4_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
return err;
}
static int
-(*lan966x_tc_flower_handlers_usage[])(struct lan966x_tc_flower_parse_usage *st) = {
- [FLOW_DISSECTOR_KEY_ETH_ADDRS] = lan966x_tc_flower_handler_ethaddr_usage,
+lan966x_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ return vcap_tc_flower_handler_vlan_usage(st,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_8021Q_PCP_CLS);
+}
+
+static int
+(*lan966x_tc_flower_handlers_usage[])(struct vcap_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
+ [FLOW_DISSECTOR_KEY_CONTROL] = lan966x_tc_flower_handler_control_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_BASIC] = lan966x_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_VLAN] = lan966x_tc_flower_handler_vlan_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
};
static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
@@ -57,8 +139,8 @@ static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
struct vcap_rule *vrule,
u16 *l3_proto)
{
- struct lan966x_tc_flower_parse_usage state = {
- .f = f,
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = f,
.vrule = vrule,
.l3_proto = ETH_P_ALL,
};
@@ -83,7 +165,8 @@ static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
struct net_device *dev,
- struct flow_cls_offload *fco)
+ struct flow_cls_offload *fco,
+ bool ingress)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
struct flow_action_entry *actent, *last_actent = NULL;
@@ -120,7 +203,8 @@ static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
"Invalid goto chain");
return -EINVAL;
}
- } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index)) {
+ } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
+ ingress)) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Last action must be 'goto'");
return -EINVAL;
@@ -139,7 +223,8 @@ static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
static int lan966x_tc_flower_add(struct lan966x_port *port,
struct flow_cls_offload *f,
- struct vcap_admin *admin)
+ struct vcap_admin *admin,
+ bool ingress)
{
struct flow_action_entry *act;
u16 l3_proto = ETH_P_ALL;
@@ -148,7 +233,7 @@ static int lan966x_tc_flower_add(struct lan966x_port *port,
int err, idx;
err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl,
- port->dev, f);
+ port->dev, f, ingress);
if (err)
return err;
@@ -231,8 +316,27 @@ static int lan966x_tc_flower_del(struct lan966x_port *port,
return err;
}
+static int lan966x_tc_flower_stats(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin)
+{
+ struct vcap_counter count = {};
+ int err;
+
+ err = vcap_get_rule_count_by_cookie(port->lan966x->vcap_ctrl,
+ &count, f->cookie);
+ if (err)
+ return err;
+
+ flow_stats_update(&f->stats, 0x0, count.value, 0, 0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ return err;
+}
+
int lan966x_tc_flower(struct lan966x_port *port,
- struct flow_cls_offload *f)
+ struct flow_cls_offload *f,
+ bool ingress)
{
struct vcap_admin *admin;
@@ -245,9 +349,11 @@ int lan966x_tc_flower(struct lan966x_port *port,
switch (f->command) {
case FLOW_CLS_REPLACE:
- return lan966x_tc_flower_add(port, f, admin);
+ return lan966x_tc_flower_add(port, f, admin, ingress);
case FLOW_CLS_DESTROY:
return lan966x_tc_flower_del(port, f, admin);
+ case FLOW_CLS_STATS:
+ return lan966x_tc_flower_stats(port, f, admin);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
new file mode 100644
index 000000000000..7a0db58f5513
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "lan966x_vcap_ag_api.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+static void lan966x_vcap_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_VCAP_S2_CFG_ENA_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ for (int l = 0; l < admin->lookups; ++l) {
+ out->prf(out->dst, "\n Lookup %d: ", l);
+
+ out->prf(out->dst, "\n snap: ");
+ if (ANA_VCAP_S2_CFG_SNAP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_llc");
+ else
+ out->prf(out->dst, "mac_snap");
+
+ out->prf(out->dst, "\n oam: ");
+ if (ANA_VCAP_S2_CFG_OAM_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_oam");
+
+ out->prf(out->dst, "\n arp: ");
+ if (ANA_VCAP_S2_CFG_ARP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_arp");
+
+ out->prf(out->dst, "\n ipv4_other: ");
+ if (ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ip4_other");
+
+ out->prf(out->dst, "\n ipv4_tcp_udp: ");
+ if (ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ipv4_tcp_udp");
+
+ out->prf(out->dst, "\n ipv6: ");
+ switch (ANA_VCAP_S2_CFG_IP6_CFG_GET(val) & (0x3 << l)) {
+ case VCAP_IS2_PS_IPV6_TCPUDP_OTHER:
+ out->prf(out->dst, "ipv6_tcp_udp ipv6_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_STD:
+ out->prf(out->dst, "ipv6_std");
+ break;
+ case VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER:
+ out->prf(out->dst, "ipv4_tcp_udp ipv4_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ }
+ }
+
+ out->prf(out->dst, "\n");
+}
+
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct vcap_info *vcap;
+ struct vcap_control *vctrl;
+
+ vctrl = lan966x->vcap_ctrl;
+ vcap = &vctrl->vcaps[admin->vtype];
+
+ out->prf(out->dst, "%s:\n", vcap->name);
+ lan966x_vcap_port_keys(port, admin, out);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index 76a9fb113f50..68f9d69fd37b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -4,18 +4,12 @@
#include "lan966x_vcap_ag_api.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#include "vcap_api_debugfs.h"
#define STREAMSIZE (64 * 4)
#define LAN966X_IS2_LOOKUPS 2
-enum vcap_is2_port_sel_ipv6 {
- VCAP_IS2_PS_IPV6_TCPUDP_OTHER,
- VCAP_IS2_PS_IPV6_STD,
- VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER,
- VCAP_IS2_PS_IPV6_MAC_ETYPE,
-};
-
static struct lan966x_vcap_inst {
enum vcap_type vtype; /* type of vcap */
int tgt_inst; /* hardware instance number */
@@ -23,6 +17,7 @@ static struct lan966x_vcap_inst {
int first_cid; /* first chain id in this vcap */
int last_cid; /* last chain id in this vcap */
int count; /* number of available addresses */
+ bool ingress; /* is vcap in the ingress path */
} lan966x_vcap_inst_cfg[] = {
{
.vtype = VCAP_TYPE_IS2, /* IS2-0 */
@@ -31,6 +26,7 @@ static struct lan966x_vcap_inst {
.first_cid = LAN966X_VCAP_CID_IS2_L0,
.last_cid = LAN966X_VCAP_CID_IS2_MAX,
.count = 256,
+ .ingress = true,
},
};
@@ -383,13 +379,6 @@ static void lan966x_vcap_move(struct net_device *dev,
lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
}
-static int lan966x_vcap_port_info(struct net_device *dev,
- struct vcap_admin *admin,
- struct vcap_output_print *out)
-{
- return 0;
-}
-
static struct vcap_operations lan966x_vcap_ops = {
.validate_keyset = lan966x_vcap_validate_keyset,
.add_default_fields = lan966x_vcap_add_default_fields,
@@ -431,6 +420,7 @@ lan966x_vcap_admin_alloc(struct lan966x *lan966x, struct vcap_control *ctrl,
admin->vtype = cfg->vtype;
admin->vinst = 0;
+ admin->ingress = cfg->ingress;
admin->w32be = true;
admin->tgt_inst = cfg->tgt_inst;
@@ -483,6 +473,7 @@ int lan966x_vcap_init(struct lan966x *lan966x)
struct lan966x_vcap_inst *cfg;
struct vcap_control *ctrl;
struct vcap_admin *admin;
+ struct dentry *dir;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -506,11 +497,17 @@ int lan966x_vcap_init(struct lan966x *lan966x)
list_add_tail(&admin->list, &ctrl->list);
}
- for (int p = 0; p < lan966x->num_phys_ports; ++p)
- if (lan966x->ports[p])
+ dir = vcap_debugfs(lan966x->dev, lan966x->debugfs_root, ctrl);
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (lan966x->ports[p]) {
+ vcap_port_debugfs(lan966x->dev, dir, ctrl,
+ lan966x->ports[p]->dev);
+
lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(true),
ANA_VCAP_S2_CFG_ENA, lan966x,
ANA_VCAP_S2_CFG(lan966x->ports[p]->chip_port));
+ }
+ }
lan966x->vcap_ctrl = ctrl;
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index d0ed7090aa54..1cb1cc3f1a85 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -9,7 +9,8 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
- sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o sparx5_tc_matchall.o
+ sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \
+ sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o sparx5_psfp.o
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 3c5d4fe99373..c213a4414e65 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -198,12 +198,14 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_QSYS, 0x110a0000, 2 }, /* 0x6110a0000 */
{ TARGET_QFWD, 0x110b0000, 2 }, /* 0x6110b0000 */
{ TARGET_XQS, 0x110c0000, 2 }, /* 0x6110c0000 */
+ { TARGET_VCAP_ES2, 0x110d0000, 2 }, /* 0x6110d0000 */
{ TARGET_CLKGEN, 0x11100000, 2 }, /* 0x611100000 */
{ TARGET_ANA_AC_POL, 0x11200000, 2 }, /* 0x611200000 */
{ TARGET_QRES, 0x11280000, 2 }, /* 0x611280000 */
{ TARGET_EACL, 0x112c0000, 2 }, /* 0x6112c0000 */
{ TARGET_ANA_CL, 0x11400000, 2 }, /* 0x611400000 */
{ TARGET_ANA_L3, 0x11480000, 2 }, /* 0x611480000 */
+ { TARGET_ANA_AC_SDLB, 0x11500000, 2 }, /* 0x611500000 */
{ TARGET_HSCH, 0x11580000, 2 }, /* 0x611580000 */
{ TARGET_REW, 0x11600000, 2 }, /* 0x611600000 */
{ TARGET_ANA_L2, 0x11800000, 2 }, /* 0x611800000 */
@@ -500,8 +502,8 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
clk_period = sparx5_clk_period(freq);
- spx5_rmw(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(clk_period / 100),
- HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS,
+ spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100),
+ HSCH_SYS_CLK_PER_100PS,
sparx5,
HSCH_SYS_CLK_PER);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 4a574cdcb584..72e7928912eb 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -396,6 +396,7 @@ int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
struct sk_buff *skb);
irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
+int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
/* sparx5_vcap_impl.c */
int sparx5_vcap_init(struct sparx5 *sparx5);
@@ -413,6 +414,129 @@ int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+/* sparx5_pool.c */
+struct sparx5_pool_entry {
+ u16 ref_cnt;
+ u32 idx; /* tc index */
+};
+
+u32 sparx5_pool_idx_to_id(u32 idx);
+int sparx5_pool_put(struct sparx5_pool_entry *pool, int size, u32 id);
+int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id);
+int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
+ u32 *id);
+
+/* sparx5_sdlb.c */
+#define SPX5_SDLB_PUP_TOKEN_DISABLE 0x1FFF
+#define SPX5_SDLB_PUP_TOKEN_MAX (SPX5_SDLB_PUP_TOKEN_DISABLE - 1)
+#define SPX5_SDLB_GROUP_RATE_MAX 25000000000ULL
+#define SPX5_SDLB_2CYCLES_TYPE2_THRES_OFFSET 13
+#define SPX5_SDLB_CNT 4096
+#define SPX5_SDLB_GROUP_CNT 10
+#define SPX5_CLK_PER_100PS_DEFAULT 16
+
+struct sparx5_sdlb_group {
+ u64 max_rate;
+ u32 min_burst;
+ u32 frame_size;
+ u32 pup_interval;
+ u32 nsets;
+};
+
+extern struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT];
+int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval,
+ u64 rate);
+
+int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5);
+int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst);
+int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group);
+
+int sparx5_sdlb_group_add(struct sparx5 *sparx5, u32 group, u32 idx);
+int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx);
+
+void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
+ u32 frame_size, u32 idx);
+
+/* sparx5_police.c */
+enum {
+ /* More policer types will be added later */
+ SPX5_POL_SERVICE
+};
+
+struct sparx5_policer {
+ u32 type;
+ u32 idx;
+ u64 rate;
+ u32 burst;
+ u32 group;
+ u8 event_mask;
+};
+
+int sparx5_policer_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol);
+
+/* sparx5_psfp.c */
+#define SPX5_PSFP_GCE_CNT 4
+#define SPX5_PSFP_SG_CNT 1024
+#define SPX5_PSFP_SG_MIN_CYCLE_TIME_NS (1 * NSEC_PER_USEC)
+#define SPX5_PSFP_SG_MAX_CYCLE_TIME_NS ((1 * NSEC_PER_SEC) - 1)
+#define SPX5_PSFP_SG_MAX_IPV (SPX5_PRIOS - 1)
+#define SPX5_PSFP_SG_OPEN (SPX5_PSFP_SG_CNT - 1)
+#define SPX5_PSFP_SG_CYCLE_TIME_DEFAULT 1000000
+#define SPX5_PSFP_SF_MAX_SDU 16383
+
+struct sparx5_psfp_fm {
+ struct sparx5_policer pol;
+};
+
+struct sparx5_psfp_gce {
+ bool gate_state; /* StreamGateState */
+ u32 interval; /* TimeInterval */
+ u32 ipv; /* InternalPriorityValue */
+ u32 maxoctets; /* IntervalOctetMax */
+};
+
+struct sparx5_psfp_sg {
+ bool gate_state; /* PSFPAdminGateStates */
+ bool gate_enabled; /* PSFPGateEnabled */
+ u32 ipv; /* PSFPAdminIPV */
+ struct timespec64 basetime; /* PSFPAdminBaseTime */
+ u32 cycletime; /* PSFPAdminCycleTime */
+ u32 cycletimeext; /* PSFPAdminCycleTimeExtension */
+ u32 num_entries; /* PSFPAdminControlListLength */
+ struct sparx5_psfp_gce gce[SPX5_PSFP_GCE_CNT];
+};
+
+struct sparx5_psfp_sf {
+ bool sblock_osize_ena;
+ bool sblock_osize;
+ u32 max_sdu;
+ u32 sgid; /* Gate id */
+ u32 fmid; /* Flow meter id */
+};
+
+int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_fm *fm, u32 *id);
+int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id);
+
+int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_sg *sg, u32 *id);
+int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id);
+
+int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
+ u32 *id);
+int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id);
+
+u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx);
+u32 sparx5_psfp_isdx_get_fm(struct sparx5 *sparx5, u32 isdx);
+u32 sparx5_psfp_sf_get_sg(struct sparx5 *sparx5, u32 sfid);
+void sparx5_isdx_conf_set(struct sparx5 *sparx5, u32 isdx, u32 sfid, u32 fmid);
+
+void sparx5_psfp_init(struct sparx5 *sparx5);
+
+/* sparx5_qos.c */
+void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
+ const ktime_t org_base_time, ktime_t *new_base_time);
+
/* Clock period in picoseconds */
static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index e3bf0460333d..bd73742939d3 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -4,8 +4,8 @@
* Copyright (c) 2021 Microchip Technology Inc.
*/
-/* This file is autogenerated by cml-utils 2022-12-06 15:28:38 +0100.
- * Commit ID: 3db2ac730f134c160496f2b9f10915e347d871cb
+/* This file is autogenerated by cml-utils 2023-01-17 17:04:43 +0100.
+ * Commit ID: cc027a9bd71002aebf074df5ad8584fe1545e05e
*/
#ifndef _SPARX5_MAIN_REGS_H_
@@ -19,6 +19,7 @@ enum sparx5_target {
TARGET_ANA_AC = 1,
TARGET_ANA_ACL = 2,
TARGET_ANA_AC_POL = 4,
+ TARGET_ANA_AC_SDLB = 5,
TARGET_ANA_CL = 6,
TARGET_ANA_L2 = 7,
TARGET_ANA_L3 = 8,
@@ -46,6 +47,7 @@ enum sparx5_target {
TARGET_QS = 177,
TARGET_QSYS = 178,
TARGET_REW = 179,
+ TARGET_VCAP_ES2 = 324,
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
@@ -129,6 +131,254 @@ enum sparx5_target {
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\
FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+/* ANA_AC:TSN_SF:TSN_SF */
+#define ANA_AC_TSN_SF \
+ __REG(TARGET_ANA_AC, 0, 1, 839136, 0, 1, 4, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY BIT(9)
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
+
+#define ANA_AC_TSN_SF_PORT_NUM GENMASK(8, 0)
+#define ANA_AC_TSN_SF_PORT_NUM_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_PORT_NUM, x)
+#define ANA_AC_TSN_SF_PORT_NUM_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_PORT_NUM, x)
+
+/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */
+#define ANA_AC_TSN_SF_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, 839680, g, 1024, 4, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_CFG_TSN_SGID GENMASK(25, 16)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU GENMASK(15, 2)
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_MAX_SDU, x)
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_MAX_SDU, x)
+
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA BIT(1)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_SET(x) \
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA, x)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_GET(x) \
+ FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA, x)
+
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE BIT(0)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_SET(x) \
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_GET(x) \
+ FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
+
+/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */
+#define ANA_AC_TSN_SF_STATUS \
+ __REG(TARGET_ANA_AC, 0, 1, 839072, 0, 1, 16, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN GENMASK(25, 12)
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_FRM_LEN, x)
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_FRM_LEN, x)
+
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP BIT(11)
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
+
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID GENMASK(10, 1)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD BIT(0)
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
+
+/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */
+#define ANA_AC_SG_ACCESS_CTRL \
+ __REG(TARGET_ANA_AC, 0, 1, 839140, 0, 1, 12, 0, 0, 1, 4)
+
+#define ANA_AC_SG_ACCESS_CTRL_SGID GENMASK(9, 0)
+#define ANA_AC_SG_ACCESS_CTRL_SGID_SET(x)\
+ FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+#define ANA_AC_SG_ACCESS_CTRL_SGID_GET(x)\
+ FIELD_GET(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(x)\
+ FIELD_GET(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
+
+/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD \
+ __REG(TARGET_ANA_AC, 0, 1, 839140, 0, 1, 12, 8, 0, 1, 4)
+
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS GENMASK(15, 0)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS, x)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_GET(x)\
+ FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS, x)
+
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA BIT(31)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */
+#define ANA_AC_SG_CONFIG_REG_1 \
+ __REG(TARGET_ANA_AC, 0, 1, 851584, 0, 1, 128, 48, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */
+#define ANA_AC_SG_CONFIG_REG_2 \
+ __REG(TARGET_ANA_AC, 0, 1, 851584, 0, 1, 128, 52, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */
+#define ANA_AC_SG_CONFIG_REG_3 \
+ __REG(TARGET_ANA_AC, 0, 1, 851584, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB GENMASK(15, 0)
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB, x)
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH GENMASK(18, 16)
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH, x)
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE, x)
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS GENMASK(24, 21)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INIT_IPS, x)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INIT_IPS, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(25)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE, x)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA BIT(26)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA, x)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX BIT(27)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INVALID_RX, x)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INVALID_RX, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA BIT(28)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA, x)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED BIT(29)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */
+#define ANA_AC_SG_CONFIG_REG_4 \
+ __REG(TARGET_ANA_AC, 0, 1, 851584, 0, 1, 128, 60, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */
+#define ANA_AC_SG_CONFIG_REG_5 \
+ __REG(TARGET_ANA_AC, 0, 1, 851584, 0, 1, 128, 64, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */
+#define ANA_AC_SG_GCL_GS_CONFIG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, 851584, 0, 1, 128, 0, r, 4, 4)
+
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS GENMASK(3, 0)
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_GCL_GS_CONFIG_IPS, x)
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_IPS, x)
+
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */
+#define ANA_AC_SG_GCL_TI_CONFIG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, 851584, 0, 1, 128, 16, r, 4, 4)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */
+#define ANA_AC_SG_GCL_OCT_CONFIG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, 851584, 0, 1, 128, 32, r, 4, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */
+#define ANA_AC_SG_STATUS_REG_1 \
+ __REG(TARGET_ANA_AC, 0, 1, 839088, 0, 1, 16, 0, 0, 1, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */
+#define ANA_AC_SG_STATUS_REG_2 \
+ __REG(TARGET_ANA_AC, 0, 1, 839088, 0, 1, 16, 4, 0, 1, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */
+#define ANA_AC_SG_STATUS_REG_3 \
+ __REG(TARGET_ANA_AC, 0, 1, 839088, 0, 1, 16, 8, 0, 1, 4)
+
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB GENMASK(15, 0)
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB, x)
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB, x)
+
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE BIT(16)
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_GATE_STATE, x)
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_GATE_STATE, x)
+
+#define ANA_AC_SG_STATUS_REG_3_IPS GENMASK(23, 20)
+#define ANA_AC_SG_STATUS_REG_3_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_IPS, x)
+#define ANA_AC_SG_STATUS_REG_3_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_IPS, x)
+
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING, x)
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING, x)
+
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX GENMASK(27, 25)
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */
+#define ANA_AC_SG_STATUS_REG_4 \
+ __REG(TARGET_ANA_AC, 0, 1, 839088, 0, 1, 16, 12, 0, 1, 4)
+
/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
@@ -567,6 +817,232 @@ enum sparx5_target {
#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */
+#define ANA_AC_SDLB_XLB_START(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 295468, g, 10, 24, 0, 0, 1, 4)
+
+#define ANA_AC_SDLB_XLB_START_LBSET_START GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_START_LBSET_START_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+#define ANA_AC_SDLB_XLB_START_LBSET_START_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */
+#define ANA_AC_SDLB_PUP_INTERVAL(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 295468, g, 10, 24, 4, 0, 1, 4)
+
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL GENMASK(19, 0)
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */
+#define ANA_AC_SDLB_PUP_CTRL(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 295468, g, 10, 24, 8, 0, 1, 4)
+
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT GENMASK(18, 0)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT, x)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT, x)
+
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA BIT(24)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */
+#define ANA_AC_SDLB_LBGRP_MISC(g)\
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 295468, g, 10, 24, 12, 0, 1, 4)
+
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT GENMASK(12, 8)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */
+#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 295468, g, 10, 24, 16, 0, 1, 4)
+
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS GENMASK(12, 0)
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */
+#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 295468, g, 10, 24, 20, 0, 1, 4)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING BIT(0)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING, x)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK BIT(1)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT GENMASK(28, 16)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */
+#define ANA_AC_SDLB_PUP_TOKENS(g, r) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, 4616, 64, 0, r, 2, 4)
+
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS GENMASK(12, 0)
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:THRES */
+#define ANA_AC_SDLB_THRES(g, r) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, 4616, 64, 8, r, 2, 4)
+
+#define ANA_AC_SDLB_THRES_THRES GENMASK(9, 0)
+#define ANA_AC_SDLB_THRES_THRES_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_THRES_THRES, x)
+#define ANA_AC_SDLB_THRES_THRES_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_THRES_THRES, x)
+
+#define ANA_AC_SDLB_THRES_THRES_HYS GENMASK(25, 16)
+#define ANA_AC_SDLB_THRES_THRES_HYS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_THRES_THRES_HYS, x)
+#define ANA_AC_SDLB_THRES_THRES_HYS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_THRES_THRES_HYS, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */
+#define ANA_AC_SDLB_XLB_NEXT(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, 4616, 64, 16, 0, 1, 4)
+
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP GENMASK(27, 24)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */
+#define ANA_AC_SDLB_INH_CTRL(g, r) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, 4616, 64, 20, r, 2, 4)
+
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, x)
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, x)
+
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE GENMASK(21, 20)
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_INH_MODE, x)
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_MODE, x)
+
+#define ANA_AC_SDLB_INH_CTRL_INH_LB BIT(24)
+#define ANA_AC_SDLB_INH_CTRL_INH_LB_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
+#define ANA_AC_SDLB_INH_CTRL_INH_LB_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */
+#define ANA_AC_SDLB_INH_LBSET_ADDR(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, 4616, 64, 28, 0, 1, 4)
+
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */
+#define ANA_AC_SDLB_DLB_MISC(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, 4616, 64, 32, 0, 1, 4)
+
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA BIT(0)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA, x)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA, x)
+
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA BIT(6)
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA, x)
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA, x)
+
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ GENMASK(14, 8)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */
+#define ANA_AC_SDLB_DLB_CFG(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, 4616, 64, 36, 0, 1, 4)
+
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA BIT(11)
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA, x)
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA, x)
+
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL GENMASK(10, 9)
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL, x)
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS BIT(8)
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS, x)
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS, x)
+
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS BIT(7)
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS, x)
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS, x)
+
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL GENMASK(6, 5)
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL, x)
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL GENMASK(4, 3)
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL, x)
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE BIT(2)
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DLB_MODE, x)
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DLB_MODE, x)
+
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK GENMASK(1, 0)
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
+
/* ANA_CL:PORT:FILTER_CTRL */
#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
@@ -955,6 +1431,82 @@ enum sparx5_target {
#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_GET(x)\
FIELD_GET(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x)
+/* ANA_L2:COMMON:FWD_CFG */
+#define ANA_L2_FWD_CFG \
+ __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 0, 0, 1, 4)
+
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL GENMASK(21, 20)
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL, x)
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL, x)
+
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA BIT(18)
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA, x)
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA, x)
+
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA BIT(17)
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA, x)
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA, x)
+
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA BIT(16)
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, x)
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, x)
+
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU GENMASK(10, 8)
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_CPU_DMAC_QU, x)
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_CPU_DMAC_QU, x)
+
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA BIT(7)
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_LOOPBACK_ENA, x)
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_LOOPBACK_ENA, x)
+
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA BIT(6)
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA, x)
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA, x)
+
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL BIT(4)
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FILTER_MODE_SEL, x)
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FILTER_MODE_SEL, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA BIT(3)
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA BIT(2)
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA BIT(1)
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA, x)
+
+#define ANA_L2_FWD_CFG_FWD_ENA BIT(0)
+#define ANA_L2_FWD_CFG_FWD_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FWD_ENA, x)
+#define ANA_L2_FWD_CFG_FWD_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FWD_ENA, x)
+
/* ANA_L2:COMMON:AUTO_LRN_CFG */
#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
@@ -979,6 +1531,26 @@ enum sparx5_target {
#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+/* ANA_L2:ISDX:DLB_CFG */
+#define ANA_L2_DLB_CFG(g) \
+ __REG(TARGET_ANA_L2, 0, 1, 0, g, 4096, 128, 56, 0, 1, 4)
+
+#define ANA_L2_DLB_CFG_DLB_IDX GENMASK(12, 0)
+#define ANA_L2_DLB_CFG_DLB_IDX_SET(x)\
+ FIELD_PREP(ANA_L2_DLB_CFG_DLB_IDX, x)
+#define ANA_L2_DLB_CFG_DLB_IDX_GET(x)\
+ FIELD_GET(ANA_L2_DLB_CFG_DLB_IDX, x)
+
+/* ANA_L2:ISDX:TSN_CFG */
+#define ANA_L2_TSN_CFG(g) \
+ __REG(TARGET_ANA_L2, 0, 1, 0, g, 4096, 128, 100, 0, 1, 4)
+
+#define ANA_L2_TSN_CFG_TSN_SFID GENMASK(9, 0)
+#define ANA_L2_TSN_CFG_TSN_SFID_SET(x)\
+ FIELD_PREP(ANA_L2_TSN_CFG_TSN_SFID, x)
+#define ANA_L2_TSN_CFG_TSN_SFID_GET(x)\
+ FIELD_GET(ANA_L2_TSN_CFG_TSN_SFID, x)
+
/* ANA_L3:COMMON:VLAN_CTRL */
#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3, 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
@@ -3120,6 +3692,36 @@ enum sparx5_target {
#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\
FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */
+#define EACL_VCAP_ES2_KEY_SEL(g, r) __REG(TARGET_EACL, 0, 1, 149504, g, 138, 8, 0, r, 2, 4)
+
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL GENMASK(7, 5)
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL GENMASK(4, 2)
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL BIT(1)
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA BIT(0)
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
+
+/* EACL:CNT_TBL:ES2_CNT */
+#define EACL_ES2_CNT(g) __REG(TARGET_EACL, 0, 1, 122880, g, 2048, 4, 0, 0, 1, 4)
+
/* EACL:POL_CFG:POL_EACL_CFG */
#define EACL_POL_EACL_CFG __REG(TARGET_EACL, 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
@@ -3159,6 +3761,57 @@ enum sparx5_target {
#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\
FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */
+#define EACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_EACL, 0, 1, 118696, 0, 1, 8, 0, r, 2, 4)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY BIT(6)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY BIT(5)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY BIT(4)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY BIT(3)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY BIT(2)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY BIT(1)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY BIT(0)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+
/* EACL:RAM_CTRL:RAM_INIT */
#define EACL_RAM_INIT __REG(TARGET_EACL, 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
@@ -3654,11 +4307,11 @@ enum sparx5_target {
/* HSCH:HSCH_MISC:SYS_CLK_PER */
#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
-#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS GENMASK(7, 0)
-#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(x)\
- FIELD_PREP(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
-#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
- FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_100PS GENMASK(7, 0)
+#define HSCH_SYS_CLK_PER_100PS_SET(x)\
+ FIELD_PREP(HSCH_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_100PS_GET(x)\
+ FIELD_GET(HSCH_SYS_CLK_PER_100PS, x)
/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
@@ -5612,6 +6265,147 @@ enum sparx5_target {
#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES2_CTRL __REG(TARGET_VCAP_ES2, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES2_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_ES2_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_CMD, x)
+#define VCAP_ES2_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_CMD, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_ES2_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ADDR, x)
+#define VCAP_ES2_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_ES2_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_ES2_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_SHOT, x)
+#define VCAP_ES2_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_ES2_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_ES2_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_CLEAR_CACHE, x)
+#define VCAP_ES2_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES2_CFG __REG(TARGET_VCAP_ES2, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES2_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_ES2_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CFG_MV_NUM_POS, x)
+#define VCAP_ES2_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_ES2_CFG_MV_NUM_POS, x)
+
+#define VCAP_ES2_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_ES2_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_ES2_CFG_MV_SIZE, x)
+#define VCAP_ES2_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_ES2_CFG_MV_SIZE, x)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES2_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES2_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES2_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES2_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES2_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES2_VCAP_TG_DAT __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES2_IDX __REG(TARGET_VCAP_ES2, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES2_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_ES2_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_ES2_IDX_CORE_IDX, x)
+#define VCAP_ES2_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_ES2_IDX_CORE_IDX, x)
+
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES2_MAP __REG(TARGET_VCAP_ES2, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES2_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_ES2_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_ES2_MAP_CORE_MAP, x)
+#define VCAP_ES2_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_ES2_MAP_CORE_MAP, x)
+
+/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES2_VCAP_STICKY __REG(TARGET_VCAP_ES2, 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
+ FIELD_PREP(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
+ FIELD_GET(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+
+/* VCAP_ES2:VCAP_CONST:VCAP_VER */
+#define VCAP_ES2_VCAP_VER __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES2_ENTRY_WIDTH __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES2_ENTRY_CNT __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES2_ENTRY_SWCNT __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES2_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES2_ACTION_DEF_CNT __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES2_ACTION_WIDTH __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES2_CNT_WIDTH __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:CORE_CNT */
+#define VCAP_ES2_CORE_CNT __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:IF_CNT */
+#define VCAP_ES2_IF_CNT __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+
/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
new file mode 100644
index 000000000000..8ada5cee1342
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static int sparx5_policer_service_conf_set(struct sparx5 *sparx5,
+ struct sparx5_policer *pol)
+{
+ u32 idx, pup_tokens, max_pup_tokens, burst, thres;
+ struct sparx5_sdlb_group *g;
+ u64 rate;
+
+ g = &sdlb_groups[pol->group];
+ idx = pol->idx;
+
+ rate = pol->rate * 1000;
+ burst = pol->burst;
+
+ pup_tokens = sparx5_sdlb_pup_token_get(sparx5, g->pup_interval, rate);
+ max_pup_tokens =
+ sparx5_sdlb_pup_token_get(sparx5, g->pup_interval, g->max_rate);
+
+ thres = DIV_ROUND_UP(burst, g->min_burst);
+
+ spx5_wr(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(pup_tokens), sparx5,
+ ANA_AC_SDLB_PUP_TOKENS(idx, 0));
+
+ spx5_rmw(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(max_pup_tokens),
+ ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, sparx5,
+ ANA_AC_SDLB_INH_CTRL(idx, 0));
+
+ spx5_rmw(ANA_AC_SDLB_THRES_THRES_SET(thres), ANA_AC_SDLB_THRES_THRES,
+ sparx5, ANA_AC_SDLB_THRES(idx, 0));
+
+ return 0;
+}
+
+int sparx5_policer_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol)
+{
+ /* More policer types will be added later */
+ switch (pol->type) {
+ case SPX5_POL_SERVICE:
+ return sparx5_policer_service_conf_set(sparx5, pol);
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c
new file mode 100644
index 000000000000..b4b280c6138b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static u32 sparx5_pool_id_to_idx(u32 id)
+{
+ return --id;
+}
+
+u32 sparx5_pool_idx_to_id(u32 idx)
+{
+ return ++idx;
+}
+
+/* Release resource from pool.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_put(struct sparx5_pool_entry *pool, int size, u32 id)
+{
+ struct sparx5_pool_entry *e_itr;
+
+ e_itr = (pool + sparx5_pool_id_to_idx(id));
+ if (e_itr->ref_cnt == 0)
+ return -EINVAL;
+
+ return --e_itr->ref_cnt;
+}
+
+/* Get resource from pool.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id)
+{
+ struct sparx5_pool_entry *e_itr;
+ int i;
+
+ for (i = 0, e_itr = pool; i < size; i++, e_itr++) {
+ if (e_itr->ref_cnt == 0) {
+ *id = sparx5_pool_idx_to_id(i);
+ return ++e_itr->ref_cnt;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/* Get resource from pool that matches index.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
+ u32 *id)
+{
+ struct sparx5_pool_entry *e_itr;
+ int i, ret = -ENOSPC;
+
+ for (i = 0, e_itr = pool; i < size; i++, e_itr++) {
+ /* Pool index of first free entry */
+ if (e_itr->ref_cnt == 0 && ret == -ENOSPC)
+ ret = i;
+ /* Tc index already in use ? */
+ if (e_itr->idx == idx && e_itr->ref_cnt > 0) {
+ ret = i;
+ break;
+ }
+ }
+
+ /* Did we find a free entry? */
+ if (ret >= 0) {
+ *id = sparx5_pool_idx_to_id(ret);
+ e_itr = (pool + ret);
+ e_itr->idx = idx;
+ return ++e_itr->ref_cnt;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
new file mode 100644
index 000000000000..8dee1ab1fa75
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define SPX5_PSFP_SF_CNT 1024
+#define SPX5_PSFP_SG_CONFIG_CHANGE_SLEEP 1000
+#define SPX5_PSFP_SG_CONFIG_CHANGE_TIMEO 100000
+
+/* Pool of available service policers */
+static struct sparx5_pool_entry sparx5_psfp_fm_pool[SPX5_SDLB_CNT];
+
+/* Pool of available stream gates */
+static struct sparx5_pool_entry sparx5_psfp_sg_pool[SPX5_PSFP_SG_CNT];
+
+/* Pool of available stream filters */
+static struct sparx5_pool_entry sparx5_psfp_sf_pool[SPX5_PSFP_SF_CNT];
+
+static int sparx5_psfp_sf_get(u32 *id)
+{
+ return sparx5_pool_get(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+}
+
+static int sparx5_psfp_sf_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+}
+
+static int sparx5_psfp_sg_get(u32 idx, u32 *id)
+{
+ return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT,
+ idx, id);
+}
+
+static int sparx5_psfp_sg_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT, id);
+}
+
+static int sparx5_psfp_fm_get(u32 idx, u32 *id)
+{
+ return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, idx,
+ id);
+}
+
+static int sparx5_psfp_fm_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, id);
+}
+
+u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx)
+{
+ return ANA_L2_TSN_CFG_TSN_SFID_GET(spx5_rd(sparx5,
+ ANA_L2_TSN_CFG(isdx)));
+}
+
+u32 sparx5_psfp_isdx_get_fm(struct sparx5 *sparx5, u32 isdx)
+{
+ return ANA_L2_DLB_CFG_DLB_IDX_GET(spx5_rd(sparx5,
+ ANA_L2_DLB_CFG(isdx)));
+}
+
+u32 sparx5_psfp_sf_get_sg(struct sparx5 *sparx5, u32 sfid)
+{
+ return ANA_AC_TSN_SF_CFG_TSN_SGID_GET(spx5_rd(sparx5,
+ ANA_AC_TSN_SF_CFG(sfid)));
+}
+
+void sparx5_isdx_conf_set(struct sparx5 *sparx5, u32 isdx, u32 sfid, u32 fmid)
+{
+ spx5_rmw(ANA_L2_TSN_CFG_TSN_SFID_SET(sfid), ANA_L2_TSN_CFG_TSN_SFID,
+ sparx5, ANA_L2_TSN_CFG(isdx));
+
+ spx5_rmw(ANA_L2_DLB_CFG_DLB_IDX_SET(fmid), ANA_L2_DLB_CFG_DLB_IDX,
+ sparx5, ANA_L2_DLB_CFG(isdx));
+}
+
+/* Internal priority value to internal priority selector */
+static u32 sparx5_psfp_ipv_to_ips(s32 ipv)
+{
+ return ipv > 0 ? (ipv | BIT(3)) : 0;
+}
+
+static int sparx5_psfp_sgid_get_status(struct sparx5 *sparx5)
+{
+ return spx5_rd(sparx5, ANA_AC_SG_ACCESS_CTRL);
+}
+
+static int sparx5_psfp_sgid_wait_for_completion(struct sparx5 *sparx5)
+{
+ u32 val;
+
+ return readx_poll_timeout(sparx5_psfp_sgid_get_status, sparx5, val,
+ !ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(val),
+ SPX5_PSFP_SG_CONFIG_CHANGE_SLEEP,
+ SPX5_PSFP_SG_CONFIG_CHANGE_TIMEO);
+}
+
+static void sparx5_psfp_sg_config_change(struct sparx5 *sparx5, u32 id)
+{
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_SGID_SET(id), sparx5,
+ ANA_AC_SG_ACCESS_CTRL);
+
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(1) |
+ ANA_AC_SG_ACCESS_CTRL_SGID_SET(id),
+ sparx5, ANA_AC_SG_ACCESS_CTRL);
+
+ if (sparx5_psfp_sgid_wait_for_completion(sparx5) < 0)
+ pr_debug("%s:%d timed out waiting for sgid completion",
+ __func__, __LINE__);
+}
+
+static void sparx5_psfp_sf_set(struct sparx5 *sparx5, u32 id,
+ const struct sparx5_psfp_sf *sf)
+{
+ /* Configure stream gate*/
+ spx5_rmw(ANA_AC_TSN_SF_CFG_TSN_SGID_SET(sf->sgid) |
+ ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(sf->max_sdu) |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_SET(sf->sblock_osize) |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_SET(sf->sblock_osize_ena),
+ ANA_AC_TSN_SF_CFG_TSN_SGID | ANA_AC_TSN_SF_CFG_TSN_MAX_SDU |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA,
+ sparx5, ANA_AC_TSN_SF_CFG(id));
+}
+
+static int sparx5_psfp_sg_set(struct sparx5 *sparx5, u32 id,
+ const struct sparx5_psfp_sg *sg)
+{
+ u32 ips, base_lsb, base_msb, accum_time_interval = 0;
+ const struct sparx5_psfp_gce *gce;
+ int i;
+
+ ips = sparx5_psfp_ipv_to_ips(sg->ipv);
+ base_lsb = sg->basetime.tv_sec & 0xffffffff;
+ base_msb = sg->basetime.tv_sec >> 32;
+
+ /* Set stream gate id */
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_SGID_SET(id), sparx5,
+ ANA_AC_SG_ACCESS_CTRL);
+
+ /* Write AdminPSFP values */
+ spx5_wr(sg->basetime.tv_nsec, sparx5, ANA_AC_SG_CONFIG_REG_1);
+ spx5_wr(base_lsb, sparx5, ANA_AC_SG_CONFIG_REG_2);
+
+ spx5_rmw(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(base_msb) |
+ ANA_AC_SG_CONFIG_REG_3_INIT_IPS_SET(ips) |
+ ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_SET(sg->num_entries) |
+ ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_SET(sg->gate_state) |
+ ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_SET(1),
+ ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB |
+ ANA_AC_SG_CONFIG_REG_3_INIT_IPS |
+ ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH |
+ ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE,
+ sparx5, ANA_AC_SG_CONFIG_REG_3);
+
+ spx5_wr(sg->cycletime, sparx5, ANA_AC_SG_CONFIG_REG_4);
+ spx5_wr(sg->cycletimeext, sparx5, ANA_AC_SG_CONFIG_REG_5);
+
+ /* For each scheduling entry */
+ for (i = 0; i < sg->num_entries; i++) {
+ gce = &sg->gce[i];
+ ips = sparx5_psfp_ipv_to_ips(gce->ipv);
+ /* hardware needs TimeInterval to be cumulative */
+ accum_time_interval += gce->interval;
+ /* Set gate state */
+ spx5_wr(ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(ips) |
+ ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_SET(gce->gate_state),
+ sparx5, ANA_AC_SG_GCL_GS_CONFIG(i));
+
+ /* Set time interval */
+ spx5_wr(accum_time_interval, sparx5,
+ ANA_AC_SG_GCL_TI_CONFIG(i));
+
+ /* Set maximum octets */
+ spx5_wr(gce->maxoctets, sparx5, ANA_AC_SG_GCL_OCT_CONFIG(i));
+ }
+
+ return 0;
+}
+
+static int sparx5_sdlb_conf_set(struct sparx5 *sparx5,
+ struct sparx5_psfp_fm *fm)
+{
+ int (*sparx5_sdlb_group_action)(struct sparx5 *sparx5, u32 group,
+ u32 idx);
+
+ if (!fm->pol.rate && !fm->pol.burst)
+ sparx5_sdlb_group_action = &sparx5_sdlb_group_del;
+ else
+ sparx5_sdlb_group_action = &sparx5_sdlb_group_add;
+
+ sparx5_policer_conf_set(sparx5, &fm->pol);
+
+ return sparx5_sdlb_group_action(sparx5, fm->pol.group, fm->pol.idx);
+}
+
+int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
+ u32 *id)
+{
+ int ret;
+
+ ret = sparx5_psfp_sf_get(id);
+ if (ret < 0)
+ return ret;
+
+ sparx5_psfp_sf_set(sparx5, *id, sf);
+
+ return 0;
+}
+
+int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id)
+{
+ const struct sparx5_psfp_sf sf = { 0 };
+
+ sparx5_psfp_sf_set(sparx5, id, &sf);
+
+ return sparx5_psfp_sf_put(id);
+}
+
+int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_sg *sg, u32 *id)
+{
+ ktime_t basetime;
+ int ret;
+
+ ret = sparx5_psfp_sg_get(uidx, id);
+ if (ret < 0)
+ return ret;
+ /* Was already in use, no need to reconfigure */
+ if (ret > 1)
+ return 0;
+
+ /* Calculate basetime for this stream gate */
+ sparx5_new_base_time(sparx5, sg->cycletime, 0, &basetime);
+ sg->basetime = ktime_to_timespec64(basetime);
+
+ sparx5_psfp_sg_set(sparx5, *id, sg);
+
+ /* Signal hardware to copy AdminPSFP values into OperPSFP values */
+ sparx5_psfp_sg_config_change(sparx5, *id);
+
+ return 0;
+}
+
+int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id)
+{
+ const struct sparx5_psfp_sg sg = { 0 };
+ int ret;
+
+ ret = sparx5_psfp_sg_put(id);
+ if (ret < 0)
+ return ret;
+ /* Stream gate still in use ? */
+ if (ret > 0)
+ return 0;
+
+ return sparx5_psfp_sg_set(sparx5, id, &sg);
+}
+
+int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_fm *fm, u32 *id)
+{
+ struct sparx5_policer *pol = &fm->pol;
+ int ret;
+
+ /* Get flow meter */
+ ret = sparx5_psfp_fm_get(uidx, &fm->pol.idx);
+ if (ret < 0)
+ return ret;
+ /* Was already in use, no need to reconfigure */
+ if (ret > 1)
+ return 0;
+
+ ret = sparx5_sdlb_group_get_by_rate(sparx5, pol->rate, pol->burst);
+ if (ret < 0)
+ return ret;
+
+ fm->pol.group = ret;
+
+ ret = sparx5_sdlb_conf_set(sparx5, fm);
+ if (ret < 0)
+ return ret;
+
+ *id = fm->pol.idx;
+
+ return 0;
+}
+
+int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id)
+{
+ struct sparx5_psfp_fm fm = { .pol.idx = id,
+ .pol.type = SPX5_POL_SERVICE };
+ int ret;
+
+ /* Find the group that this lb belongs to */
+ ret = sparx5_sdlb_group_get_by_index(sparx5, id, &fm.pol.group);
+ if (ret < 0)
+ return ret;
+
+ ret = sparx5_psfp_fm_put(id);
+ if (ret < 0)
+ return ret;
+ /* Do not reset flow-meter if still in use. */
+ if (ret > 0)
+ return 0;
+
+ return sparx5_sdlb_conf_set(sparx5, &fm);
+}
+
+void sparx5_psfp_init(struct sparx5 *sparx5)
+{
+ const struct sparx5_sdlb_group *group;
+ int i;
+
+ for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ group = &sdlb_groups[i];
+ sparx5_sdlb_group_init(sparx5, group->max_rate,
+ group->min_burst, group->frame_size, i);
+ }
+
+ spx5_wr(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_SET(1),
+ sparx5, ANA_AC_SG_CYCLETIME_UPDATE_PERIOD);
+
+ spx5_rmw(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_SET(1),
+ ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, sparx5, ANA_L2_FWD_CFG);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 0ed1ea7727c5..0edb98cef7e4 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -476,8 +476,7 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
return 0;
}
-static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
struct sparx5 *sparx5 = phc->sparx5;
@@ -633,7 +632,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
/* Enable master counters */
spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
- for (i = 0; i < sparx5->port_count; i++) {
+ for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
@@ -649,7 +648,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
- for (i = 0; i < sparx5->port_count; i++) {
+ for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
index 379e540e5e6a..5f34febaee6b 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -9,6 +9,63 @@
#include "sparx5_main.h"
#include "sparx5_qos.h"
+/* Calculate new base_time based on cycle_time.
+ *
+ * The hardware requires a base_time that is always in the future.
+ * We define threshold_time as current_time + (2 * cycle_time).
+ * If base_time is below threshold_time this function recalculates it to be in
+ * the interval:
+ * threshold_time <= base_time < (threshold_time + cycle_time)
+ *
+ * A very simple algorithm could be like this:
+ * new_base_time = org_base_time + N * cycle_time
+ * using the lowest N so (new_base_time >= threshold_time
+ */
+void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
+ const ktime_t org_base_time, ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time, new_time;
+ struct timespec64 ts;
+ u64 nr_of_cycles_p2;
+ u64 nr_of_cycles;
+ u64 diff_time;
+
+ new_time = org_base_time;
+
+ sparx5_ptp_gettime64(&sparx5->phc[SPARX5_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+ diff_time = threshold_time - new_time;
+ nr_of_cycles = div_u64(diff_time, cycle_time);
+ nr_of_cycles_p2 = 1; /* Use 2^0 as start value */
+
+ if (new_time >= threshold_time) {
+ *new_base_time = new_time;
+ return;
+ }
+
+ /* Calculate the smallest power of 2 (nr_of_cycles_p2)
+ * that is larger than nr_of_cycles.
+ */
+ while (nr_of_cycles_p2 < nr_of_cycles)
+ nr_of_cycles_p2 <<= 1; /* Next (higher) power of 2 */
+
+ /* Add as big chunks (power of 2 * cycle_time)
+ * as possible for each power of 2
+ */
+ while (nr_of_cycles_p2) {
+ if (new_time < threshold_time) {
+ new_time += cycle_time * nr_of_cycles_p2;
+ while (new_time < threshold_time)
+ new_time += cycle_time * nr_of_cycles_p2;
+ new_time -= cycle_time * nr_of_cycles_p2;
+ }
+ nr_of_cycles_p2 >>= 1; /* Next (lower) power of 2 */
+ }
+ new_time += cycle_time;
+ *new_base_time = new_time;
+}
+
/* Max rates for leak groups */
static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
1048568, /* 1.049 Gbps */
@@ -393,6 +450,8 @@ int sparx5_qos_init(struct sparx5 *sparx5)
if (ret < 0)
return ret;
+ sparx5_psfp_init(sparx5);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
new file mode 100644
index 000000000000..f5267218caeb
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT] = {
+ { SPX5_SDLB_GROUP_RATE_MAX, 8192 / 1, 64 }, /* 25 G */
+ { 15000000000ULL, 8192 / 1, 64 }, /* 15 G */
+ { 10000000000ULL, 8192 / 1, 64 }, /* 10 G */
+ { 5000000000ULL, 8192 / 1, 64 }, /* 5 G */
+ { 2500000000ULL, 8192 / 1, 64 }, /* 2.5 G */
+ { 1000000000ULL, 8192 / 2, 64 }, /* 1 G */
+ { 500000000ULL, 8192 / 2, 64 }, /* 500 M */
+ { 100000000ULL, 8192 / 4, 64 }, /* 100 M */
+ { 50000000ULL, 8192 / 4, 64 }, /* 50 M */
+ { 5000000ULL, 8192 / 8, 64 } /* 5 M */
+};
+
+int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5)
+{
+ u32 clk_per_100ps;
+ u64 clk_hz;
+
+ clk_per_100ps = HSCH_SYS_CLK_PER_100PS_GET(spx5_rd(sparx5,
+ HSCH_SYS_CLK_PER));
+ if (!clk_per_100ps)
+ clk_per_100ps = SPX5_CLK_PER_100PS_DEFAULT;
+
+ clk_hz = (10 * 1000 * 1000) / clk_per_100ps;
+ return clk_hz *= 1000;
+}
+
+static int sparx5_sdlb_pup_interval_get(struct sparx5 *sparx5, u32 max_token,
+ u64 max_rate)
+{
+ u64 clk_hz;
+
+ clk_hz = sparx5_sdlb_clk_hz_get(sparx5);
+
+ return div64_u64((8 * clk_hz * max_token), max_rate);
+}
+
+int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval, u64 rate)
+{
+ u64 clk_hz;
+
+ if (!rate)
+ return SPX5_SDLB_PUP_TOKEN_DISABLE;
+
+ clk_hz = sparx5_sdlb_clk_hz_get(sparx5);
+
+ return DIV64_U64_ROUND_UP((rate * pup_interval), (clk_hz * 8));
+}
+
+static void sparx5_sdlb_group_disable(struct sparx5 *sparx5, u32 group)
+{
+ spx5_rmw(ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(0),
+ ANA_AC_SDLB_PUP_CTRL_PUP_ENA, sparx5,
+ ANA_AC_SDLB_PUP_CTRL(group));
+}
+
+static void sparx5_sdlb_group_enable(struct sparx5 *sparx5, u32 group)
+{
+ spx5_rmw(ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(1),
+ ANA_AC_SDLB_PUP_CTRL_PUP_ENA, sparx5,
+ ANA_AC_SDLB_PUP_CTRL(group));
+}
+
+static u32 sparx5_sdlb_group_get_first(struct sparx5 *sparx5, u32 group)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_XLB_START(group));
+
+ return ANA_AC_SDLB_XLB_START_LBSET_START_GET(val);
+}
+
+static u32 sparx5_sdlb_group_get_next(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_XLB_NEXT(lb));
+
+ return ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(val);
+}
+
+static bool sparx5_sdlb_group_is_first(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ return lb == sparx5_sdlb_group_get_first(sparx5, group);
+}
+
+static bool sparx5_sdlb_group_is_last(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ return lb == sparx5_sdlb_group_get_next(sparx5, group, lb);
+}
+
+static bool sparx5_sdlb_group_is_empty(struct sparx5 *sparx5, u32 group)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_PUP_CTRL(group));
+
+ return ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(val) == 0;
+}
+
+static u32 sparx5_sdlb_group_get_last(struct sparx5 *sparx5, u32 group)
+{
+ u32 itr, next;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, group);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+ if (itr == next)
+ return itr;
+
+ itr = next;
+ }
+}
+
+static bool sparx5_sdlb_group_is_singular(struct sparx5 *sparx5, u32 group)
+{
+ if (sparx5_sdlb_group_is_empty(sparx5, group))
+ return false;
+
+ return sparx5_sdlb_group_get_first(sparx5, group) ==
+ sparx5_sdlb_group_get_last(sparx5, group);
+}
+
+static int sparx5_sdlb_group_get_adjacent(struct sparx5 *sparx5, u32 group,
+ u32 idx, u32 *prev, u32 *next,
+ u32 *first)
+{
+ u32 itr;
+
+ *first = sparx5_sdlb_group_get_first(sparx5, group);
+ *prev = *first;
+ *next = *first;
+ itr = *first;
+
+ for (;;) {
+ *next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+
+ if (itr == idx)
+ return 0; /* Found it */
+
+ if (itr == *next)
+ return -EINVAL; /* Was not found */
+
+ *prev = itr;
+ itr = *next;
+ }
+}
+
+static int sparx5_sdlb_group_get_count(struct sparx5 *sparx5, u32 group)
+{
+ u32 itr, next;
+ int count = 0;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, group);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+ if (itr == next)
+ return count;
+
+ itr = next;
+ count++;
+ }
+}
+
+int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst)
+{
+ const struct sparx5_sdlb_group *group;
+ u64 rate_bps;
+ int i, count;
+
+ rate_bps = rate * 1000;
+
+ for (i = SPX5_SDLB_GROUP_CNT - 1; i >= 0; i--) {
+ group = &sdlb_groups[i];
+
+ count = sparx5_sdlb_group_get_count(sparx5, i);
+
+ /* Check that this group is not full.
+ * According to LB group configuration rules: the number of XLBs
+ * in a group must not exceed PUP_INTERVAL/4 - 1.
+ */
+ if (count > ((group->pup_interval / 4) - 1))
+ continue;
+
+ if (rate_bps < group->max_rate)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group)
+{
+ u32 itr, next;
+ int i;
+
+ for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ if (sparx5_sdlb_group_is_empty(sparx5, i))
+ continue;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, i);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, i, itr);
+
+ if (itr == idx) {
+ *group = i;
+ return 0; /* Found it */
+ }
+ if (itr == next)
+ break; /* Was not found */
+
+ itr = next;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int sparx5_sdlb_group_link(struct sparx5 *sparx5, u32 group, u32 idx,
+ u32 first, u32 next, bool empty)
+{
+ /* Stop leaking */
+ sparx5_sdlb_group_disable(sparx5, group);
+
+ if (empty)
+ return 0;
+
+ /* Link insertion lb to next lb */
+ spx5_wr(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(next) |
+ ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(group),
+ sparx5, ANA_AC_SDLB_XLB_NEXT(idx));
+
+ /* Set the first lb */
+ spx5_wr(ANA_AC_SDLB_XLB_START_LBSET_START_SET(first), sparx5,
+ ANA_AC_SDLB_XLB_START(group));
+
+ /* Start leaking */
+ sparx5_sdlb_group_enable(sparx5, group);
+
+ return 0;
+};
+
+int sparx5_sdlb_group_add(struct sparx5 *sparx5, u32 group, u32 idx)
+{
+ u32 first, next;
+
+ /* We always add to head of the list */
+ first = idx;
+
+ if (sparx5_sdlb_group_is_empty(sparx5, group))
+ next = idx;
+ else
+ next = sparx5_sdlb_group_get_first(sparx5, group);
+
+ return sparx5_sdlb_group_link(sparx5, group, idx, first, next, false);
+}
+
+int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx)
+{
+ u32 first, next, prev;
+ bool empty = false;
+
+ if (sparx5_sdlb_group_get_adjacent(sparx5, group, idx, &prev, &next,
+ &first) < 0) {
+ pr_err("%s:%d Could not find idx: %d in group: %d", __func__,
+ __LINE__, idx, group);
+ return -EINVAL;
+ }
+
+ if (sparx5_sdlb_group_is_singular(sparx5, group)) {
+ empty = true;
+ } else if (sparx5_sdlb_group_is_last(sparx5, group, idx)) {
+ /* idx is removed, prev is now last */
+ idx = prev;
+ next = prev;
+ } else if (sparx5_sdlb_group_is_first(sparx5, group, idx)) {
+ /* idx is removed and points to itself, first is next */
+ first = next;
+ next = idx;
+ } else {
+ /* Next is not touched */
+ idx = prev;
+ }
+
+ return sparx5_sdlb_group_link(sparx5, group, idx, first, next, empty);
+}
+
+void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
+ u32 frame_size, u32 idx)
+{
+ u32 thres_shift, mask = 0x01, power = 0;
+ struct sparx5_sdlb_group *group;
+ u64 max_token;
+
+ group = &sdlb_groups[idx];
+
+ /* Number of positions to right-shift LB's threshold value. */
+ while ((min_burst & mask) == 0) {
+ power++;
+ mask <<= 1;
+ }
+ thres_shift = SPX5_SDLB_2CYCLES_TYPE2_THRES_OFFSET - power;
+
+ max_token = (min_burst > SPX5_SDLB_PUP_TOKEN_MAX) ?
+ SPX5_SDLB_PUP_TOKEN_MAX :
+ min_burst;
+ group->pup_interval =
+ sparx5_sdlb_pup_interval_get(sparx5, max_token, max_rate);
+
+ group->frame_size = frame_size;
+
+ spx5_wr(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(group->pup_interval),
+ sparx5, ANA_AC_SDLB_PUP_INTERVAL(idx));
+
+ spx5_wr(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(frame_size),
+ sparx5, ANA_AC_SDLB_FRM_RATE_TOKENS(idx));
+
+ spx5_wr(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(thres_shift), sparx5,
+ ANA_AC_SDLB_LBGRP_MISC(idx));
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
index 205246b5af82..e80f3166db7d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -5,6 +5,7 @@
*/
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include "sparx5_tc.h"
#include "sparx5_main.h"
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 59d6ed6f4191..d73668dcc6b6 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -4,11 +4,13 @@
* Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
*/
+#include <net/tc_act/tc_gate.h>
#include <net/tcp.h>
#include "sparx5_tc.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#include "vcap_tc.h"
#include "sparx5_main.h"
#include "sparx5_vcap_impl.h"
@@ -26,223 +28,8 @@ struct sparx5_multiple_rules {
struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
};
-struct sparx5_tc_flower_parse_usage {
- struct flow_cls_offload *fco;
- struct flow_rule *frule;
- struct vcap_rule *vrule;
- struct vcap_admin *admin;
- u16 l3_proto;
- u8 l4_proto;
- unsigned int used_keys;
-};
-
-enum sparx5_is2_arp_opcode {
- SPX5_IS2_ARP_REQUEST,
- SPX5_IS2_ARP_REPLY,
- SPX5_IS2_RARP_REQUEST,
- SPX5_IS2_RARP_REPLY,
-};
-
-enum tc_arp_opcode {
- TC_ARP_OP_RESERVED,
- TC_ARP_OP_REQUEST,
- TC_ARP_OP_REPLY,
-};
-
-static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
- enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
- struct flow_match_eth_addrs match;
- struct vcap_u48_key smac, dmac;
- int err = 0;
-
- flow_rule_match_eth_addrs(st->frule, &match);
-
- if (!is_zero_ether_addr(match.mask->src)) {
- vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
- vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
- if (err)
- goto out;
- }
-
- if (!is_zero_ether_addr(match.mask->dst)) {
- vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
- vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
- return err;
-}
-
static int
-sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- int err = 0;
-
- if (st->l3_proto == ETH_P_IP) {
- struct flow_match_ipv4_addrs mt;
-
- flow_rule_match_ipv4_addrs(st->frule, &mt);
- if (mt.mask->src) {
- err = vcap_rule_add_key_u32(st->vrule,
- VCAP_KF_L3_IP4_SIP,
- be32_to_cpu(mt.key->src),
- be32_to_cpu(mt.mask->src));
- if (err)
- goto out;
- }
- if (mt.mask->dst) {
- err = vcap_rule_add_key_u32(st->vrule,
- VCAP_KF_L3_IP4_DIP,
- be32_to_cpu(mt.key->dst),
- be32_to_cpu(mt.mask->dst));
- if (err)
- goto out;
- }
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- int err = 0;
-
- if (st->l3_proto == ETH_P_IPV6) {
- struct flow_match_ipv6_addrs mt;
- struct vcap_u128_key sip;
- struct vcap_u128_key dip;
-
- flow_rule_match_ipv6_addrs(st->frule, &mt);
- /* Check if address masks are non-zero */
- if (!ipv6_addr_any(&mt.mask->src)) {
- vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
- vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
- err = vcap_rule_add_key_u128(st->vrule,
- VCAP_KF_L3_IP6_SIP, &sip);
- if (err)
- goto out;
- }
- if (!ipv6_addr_any(&mt.mask->dst)) {
- vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
- vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
- err = vcap_rule_add_key_u128(st->vrule,
- VCAP_KF_L3_IP6_DIP, &dip);
- if (err)
- goto out;
- }
- }
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
- return err;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- struct flow_match_control mt;
- u32 value, mask;
- int err = 0;
-
- flow_rule_match_control(st->frule, &mt);
-
- if (mt.mask->flags) {
- if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
- if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
- value = 1; /* initial fragment */
- mask = 0x3;
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
-
- err = vcap_rule_add_key_u32(st->vrule,
- VCAP_KF_L3_FRAGMENT_TYPE,
- value, mask);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- struct flow_match_ports mt;
- u16 value, mask;
- int err = 0;
-
- flow_rule_match_ports(st->frule, &mt);
-
- if (mt.mask->src) {
- value = be16_to_cpu(mt.key->src);
- mask = be16_to_cpu(mt.mask->src);
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
- mask);
- if (err)
- goto out;
- }
-
- if (mt.mask->dst) {
- value = be16_to_cpu(mt.key->dst);
- mask = be16_to_cpu(mt.mask->dst);
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
- mask);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
{
struct flow_match_basic mt;
int err = 0;
@@ -266,6 +53,13 @@ sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
VCAP_BIT_0);
if (err)
goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
}
}
@@ -309,265 +103,92 @@ out:
}
static int
-sparx5_tc_flower_handler_cvlan_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0;
- enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0;
- struct flow_match_vlan mt;
- u16 tpid;
- int err;
-
- if (st->admin->vtype != VCAP_TYPE_IS0)
- return -EINVAL;
-
- flow_rule_match_cvlan(st->frule, &mt);
-
- tpid = be16_to_cpu(mt.key->vlan_tpid);
-
- if (tpid == ETH_P_8021Q) {
- vid_key = VCAP_KF_8021Q_VID1;
- pcp_key = VCAP_KF_8021Q_PCP1;
- }
-
- if (mt.mask->vlan_id) {
- err = vcap_rule_add_key_u32(st->vrule, vid_key,
- mt.key->vlan_id,
- mt.mask->vlan_id);
- if (err)
- goto out;
- }
-
- if (mt.mask->vlan_priority) {
- err = vcap_rule_add_key_u32(st->vrule, pcp_key,
- mt.key->vlan_priority,
- mt.mask->vlan_priority);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
-
- return 0;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
- enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
- struct flow_match_vlan mt;
- int err;
-
- flow_rule_match_vlan(st->frule, &mt);
-
- if (st->admin->vtype == VCAP_TYPE_IS0) {
- vid_key = VCAP_KF_8021Q_VID0;
- pcp_key = VCAP_KF_8021Q_PCP0;
- }
-
- if (mt.mask->vlan_id) {
- err = vcap_rule_add_key_u32(st->vrule, vid_key,
- mt.key->vlan_id,
- mt.mask->vlan_id);
- if (err)
- goto out;
- }
-
- if (mt.mask->vlan_priority) {
- err = vcap_rule_add_key_u32(st->vrule, pcp_key,
- mt.key->vlan_priority,
- mt.mask->vlan_priority);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
-
- return 0;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
- struct flow_match_tcp mt;
- u16 tcp_flags_mask;
- u16 tcp_flags_key;
- enum vcap_bit val;
+ struct flow_match_control mt;
+ u32 value, mask;
int err = 0;
- flow_rule_match_tcp(st->frule, &mt);
- tcp_flags_key = be16_to_cpu(mt.key->flags);
- tcp_flags_mask = be16_to_cpu(mt.mask->flags);
-
- if (tcp_flags_mask & TCPHDR_FIN) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_FIN)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
- if (err)
- goto out;
- }
-
- if (tcp_flags_mask & TCPHDR_SYN) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_SYN)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
- if (err)
- goto out;
- }
-
- if (tcp_flags_mask & TCPHDR_RST) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_RST)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
- if (err)
- goto out;
- }
-
- if (tcp_flags_mask & TCPHDR_PSH) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_PSH)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
- if (err)
- goto out;
- }
+ flow_rule_match_control(st->frule, &mt);
- if (tcp_flags_mask & TCPHDR_ACK) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_ACK)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
- if (err)
- goto out;
- }
+ if (mt.mask->flags) {
+ if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
+ value = 1; /* initial fragment */
+ mask = 0x3;
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
- if (tcp_flags_mask & TCPHDR_URG) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_URG)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ value, mask);
if (err)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
return err;
out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
return err;
}
static int
-sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
{
- struct flow_match_arp mt;
- u16 value, mask;
- u32 ipval, ipmsk;
- int err;
-
- flow_rule_match_arp(st->frule, &mt);
-
- if (mt.mask->op) {
- mask = 0x3;
- if (st->l3_proto == ETH_P_ARP) {
- value = mt.key->op == TC_ARP_OP_REQUEST ?
- SPX5_IS2_ARP_REQUEST :
- SPX5_IS2_ARP_REPLY;
- } else { /* RARP */
- value = mt.key->op == TC_ARP_OP_REQUEST ?
- SPX5_IS2_RARP_REQUEST :
- SPX5_IS2_RARP_REPLY;
- }
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
- value, mask);
- if (err)
- goto out;
- }
-
- /* The IS2 ARP keyset does not support ARP hardware addresses */
- if (!is_zero_ether_addr(mt.mask->sha) ||
- !is_zero_ether_addr(mt.mask->tha)) {
- err = -EINVAL;
- goto out;
- }
-
- if (mt.mask->sip) {
- ipval = be32_to_cpu((__force __be32)mt.key->sip);
- ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
-
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
- ipval, ipmsk);
- if (err)
- goto out;
- }
-
- if (mt.mask->tip) {
- ipval = be32_to_cpu((__force __be32)mt.key->tip);
- ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
-
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
- ipval, ipmsk);
- if (err)
- goto out;
+ if (st->admin->vtype != VCAP_TYPE_IS0) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "cvlan not supported in this VCAP");
+ return -EINVAL;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
-
- return 0;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
- return err;
+ return vcap_tc_flower_handler_cvlan_usage(st);
}
static int
-sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
{
- struct flow_match_ip mt;
- int err = 0;
-
- flow_rule_match_ip(st->frule, &mt);
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
- if (mt.mask->tos) {
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
- mt.key->tos,
- mt.mask->tos);
- if (err)
- goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ vid_key = VCAP_KF_8021Q_VID0;
+ pcp_key = VCAP_KF_8021Q_PCP0;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
- return err;
+ return vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
}
-static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
- [FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
- [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
- [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
+static int (*sparx5_tc_flower_usage_handlers[])(struct vcap_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
- [FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
[FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage,
[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
- [FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
- [FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
- [FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
};
static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
@@ -575,7 +196,7 @@ static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
struct vcap_rule *vrule,
u16 *l3_proto)
{
- struct sparx5_tc_flower_parse_usage state = {
+ struct vcap_tc_flower_parse_usage state = {
.fco = fco,
.vrule = vrule,
.l3_proto = ETH_P_ALL,
@@ -607,7 +228,8 @@ static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
struct net_device *ndev,
- struct flow_cls_offload *fco)
+ struct flow_cls_offload *fco,
+ bool ingress)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
struct flow_action_entry *actent, *last_actent = NULL;
@@ -644,7 +266,8 @@ static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
"Invalid goto chain");
return -EINVAL;
}
- } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index)) {
+ } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
+ ingress)) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Last action must be 'goto'");
return -EINVAL;
@@ -667,7 +290,7 @@ static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
{
int err;
- if (admin->vtype == VCAP_TYPE_IS2) {
+ if (admin->vtype == VCAP_TYPE_IS2 || admin->vtype == VCAP_TYPE_ES2) {
err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
vrule->id);
if (err)
@@ -870,6 +493,9 @@ static int sparx5_tc_set_actionset(struct vcap_admin *admin,
case VCAP_TYPE_IS2:
aset = VCAP_AFS_BASE_TYPE;
break;
+ case VCAP_TYPE_ES2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
default:
return -EINVAL;
}
@@ -906,6 +532,10 @@ static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
link_val, /* target */
~0);
+ case VCAP_TYPE_ES2:
+ /* Add ISDX key for chaining rules from IS0 */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS, link_val,
+ ~0);
default:
break;
}
@@ -948,6 +578,18 @@ static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
0xff);
if (err)
goto out;
+ } else if (admin->vtype == VCAP_TYPE_IS0 &&
+ to_admin->vtype == VCAP_TYPE_ES2) {
+ /* Between IS0 and ES2 the ISDX value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL,
+ diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
} else {
pr_err("%s:%d: unsupported chain destination: %d\n",
__func__, __LINE__, to_cid);
@@ -957,22 +599,160 @@ out:
return err;
}
+static int sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg *sg,
+ struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ if (act->gate.prio < -1 || act->gate.prio > SPX5_PSFP_SG_MAX_IPV) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate priority");
+ return -EINVAL;
+ }
+
+ if (act->gate.cycletime < SPX5_PSFP_SG_MIN_CYCLE_TIME_NS ||
+ act->gate.cycletime > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletime");
+ return -EINVAL;
+ }
+
+ if (act->gate.cycletimeext > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletimeext");
+ return -EINVAL;
+ }
+
+ if (act->gate.num_entries >= SPX5_PSFP_GCE_CNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid number of gate entries");
+ return -EINVAL;
+ }
+
+ sg->gate_state = true;
+ sg->ipv = act->gate.prio;
+ sg->num_entries = act->gate.num_entries;
+ sg->cycletime = act->gate.cycletime;
+ sg->cycletimeext = act->gate.cycletimeext;
+
+ for (i = 0; i < sg->num_entries; i++) {
+ sg->gce[i].gate_state = !!act->gate.entries[i].gate_state;
+ sg->gce[i].interval = act->gate.entries[i].interval;
+ sg->gce[i].ipv = act->gate.entries[i].ipv;
+ sg->gce[i].maxoctets = act->gate.entries[i].maxoctets;
+ }
+
+ return 0;
+}
+
+static int sparx5_tc_flower_parse_act_police(struct sparx5_policer *pol,
+ struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ pol->type = SPX5_POL_SERVICE;
+ pol->rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
+ pol->burst = act->police.burst;
+ pol->idx = act->hw_index;
+
+ /* rate is now in kbit */
+ if (pol->rate > DIV_ROUND_UP(SPX5_SDLB_GROUP_RATE_MAX, 1000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum rate exceeded");
+ return -EINVAL;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
+ struct vcap_rule *vrule, int sg_idx,
+ int pol_idx, struct sparx5_psfp_sg *sg,
+ struct sparx5_psfp_fm *fm,
+ struct sparx5_psfp_sf *sf)
+{
+ u32 psfp_sfid = 0, psfp_fmid = 0, psfp_sgid = 0;
+ int ret;
+
+ /* Must always have a stream gate - max sdu (filter option) is evaluated
+ * after frames have passed the gate, so in case of only a policer, we
+ * allocate a stream gate that is always open.
+ */
+ if (sg_idx < 0) {
+ sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
+ sg->ipv = 0; /* Disabled */
+ sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
+ sg->num_entries = 1;
+ sg->gate_state = 1; /* Open */
+ sg->gate_enabled = 1;
+ sg->gce[0].gate_state = 1;
+ sg->gce[0].interval = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
+ sg->gce[0].ipv = 0;
+ sg->gce[0].maxoctets = 0; /* Disabled */
+ }
+
+ ret = sparx5_psfp_sg_add(sparx5, sg_idx, sg, &psfp_sgid);
+ if (ret < 0)
+ return ret;
+
+ if (pol_idx >= 0) {
+ /* Add new flow-meter */
+ ret = sparx5_psfp_fm_add(sparx5, pol_idx, fm, &psfp_fmid);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Map stream filter to stream gate */
+ sf->sgid = psfp_sgid;
+
+ /* Add new stream-filter and map it to a steam gate */
+ ret = sparx5_psfp_sf_add(sparx5, sf, &psfp_sfid);
+ if (ret < 0)
+ return ret;
+
+ /* Streams are classified by ISDX - map ISDX 1:1 to sfid for now. */
+ sparx5_isdx_conf_set(sparx5, psfp_sfid, psfp_sfid, psfp_fmid);
+
+ ret = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_BIT_1);
+ if (ret)
+ return ret;
+
+ ret = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL, psfp_sfid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int sparx5_tc_flower_replace(struct net_device *ndev,
struct flow_cls_offload *fco,
- struct vcap_admin *admin)
+ struct vcap_admin *admin,
+ bool ingress)
{
+ struct sparx5_psfp_sf sf = { .max_sdu = SPX5_PSFP_SF_MAX_SDU };
+ struct netlink_ext_ack *extack = fco->common.extack;
+ int err, idx, tc_sg_idx = -1, tc_pol_idx = -1;
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5_multiple_rules multi = {};
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_psfp_sg sg = { 0 };
+ struct sparx5_psfp_fm fm = { 0 };
struct flow_action_entry *act;
struct vcap_control *vctrl;
struct flow_rule *frule;
struct vcap_rule *vrule;
u16 l3_proto;
- int err, idx;
vctrl = port->sparx5->vcap_ctrl;
- err = sparx5_tc_flower_action_check(vctrl, ndev, fco);
+ err = sparx5_tc_flower_action_check(vctrl, ndev, fco, ingress);
if (err)
return err;
@@ -1000,8 +780,29 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
frule = flow_cls_offload_flow_rule(fco);
flow_action_for_each(idx, act, &frule->action) {
switch (act->id) {
+ case FLOW_ACTION_GATE: {
+ err = sparx5_tc_flower_parse_act_gate(&sg, act, extack);
+ if (err < 0)
+ goto out;
+
+ tc_sg_idx = act->hw_index;
+
+ break;
+ }
+ case FLOW_ACTION_POLICE: {
+ err = sparx5_tc_flower_parse_act_police(&fm.pol, act,
+ extack);
+ if (err < 0)
+ goto out;
+
+ tc_pol_idx = fm.pol.idx;
+ sf.max_sdu = act->police.mtu;
+
+ break;
+ }
case FLOW_ACTION_TRAP:
- if (admin->vtype != VCAP_TYPE_IS2) {
+ if (admin->vtype != VCAP_TYPE_IS2 &&
+ admin->vtype != VCAP_TYPE_ES2) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Trap action not supported in this VCAP");
err = -EOPNOTSUPP;
@@ -1016,8 +817,11 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
VCAP_AF_CPU_QUEUE_NUM, 0);
if (err)
goto out;
- err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
- SPX5_PMM_REPLACE_ALL);
+ if (admin->vtype != VCAP_TYPE_IS2)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_MASK_MODE,
+ SPX5_PMM_REPLACE_ALL);
if (err)
goto out;
break;
@@ -1042,6 +846,14 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
}
}
+ /* Setup PSFP */
+ if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
+ err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
+ tc_pol_idx, &sg, &fm, &sf);
+ if (err)
+ goto out;
+ }
+
err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
&multi);
if (err) {
@@ -1070,19 +882,86 @@ out:
return err;
}
+static void sparx5_tc_free_psfp_resources(struct sparx5 *sparx5,
+ struct vcap_rule *vrule)
+{
+ struct vcap_client_actionfield *afield;
+ u32 isdx, sfid, sgid, fmid;
+
+ /* Check if VCAP_AF_ISDX_VAL action is set for this rule - and if
+ * it is used for stream and/or flow-meter classification.
+ */
+ afield = vcap_find_actionfield(vrule, VCAP_AF_ISDX_VAL);
+ if (!afield)
+ return;
+
+ isdx = afield->data.u32.value;
+ sfid = sparx5_psfp_isdx_get_sf(sparx5, isdx);
+
+ if (!sfid)
+ return;
+
+ fmid = sparx5_psfp_isdx_get_fm(sparx5, isdx);
+ sgid = sparx5_psfp_sf_get_sg(sparx5, sfid);
+
+ if (fmid && sparx5_psfp_fm_del(sparx5, fmid) < 0)
+ pr_err("%s:%d Could not delete invalid fmid: %d", __func__,
+ __LINE__, fmid);
+
+ if (sgid && sparx5_psfp_sg_del(sparx5, sgid) < 0)
+ pr_err("%s:%d Could not delete invalid sgid: %d", __func__,
+ __LINE__, sgid);
+
+ if (sparx5_psfp_sf_del(sparx5, sfid) < 0)
+ pr_err("%s:%d Could not delete invalid sfid: %d", __func__,
+ __LINE__, sfid);
+
+ sparx5_isdx_conf_set(sparx5, isdx, 0, 0);
+}
+
+static int sparx5_tc_free_rule_resources(struct net_device *ndev,
+ struct vcap_control *vctrl,
+ int rule_id)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct vcap_rule *vrule;
+ int ret = 0;
+
+ vrule = vcap_get_rule(vctrl, rule_id);
+ if (!vrule || IS_ERR(vrule))
+ return -EINVAL;
+
+ sparx5_tc_free_psfp_resources(sparx5, vrule);
+
+ vcap_free_rule(vrule);
+ return ret;
+}
+
static int sparx5_tc_flower_destroy(struct net_device *ndev,
struct flow_cls_offload *fco,
struct vcap_admin *admin)
{
struct sparx5_port *port = netdev_priv(ndev);
+ int err = -ENOENT, count = 0, rule_id;
struct vcap_control *vctrl;
- int err = -ENOENT, rule_id;
vctrl = port->sparx5->vcap_ctrl;
while (true) {
rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
if (rule_id <= 0)
break;
+ if (count == 0) {
+ /* Resources are attached to the first rule of
+ * a set of rules. Only works if the rules are
+ * in the correct order.
+ */
+ err = sparx5_tc_free_rule_resources(ndev, vctrl,
+ rule_id);
+ if (err)
+ pr_err("%s:%d: could not free resources %d\n",
+ __func__, __LINE__, rule_id);
+ }
err = vcap_del_rule(vctrl, ndev, rule_id);
if (err) {
pr_err("%s:%d: could not delete rule %d\n",
@@ -1130,7 +1009,7 @@ int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
switch (fco->command) {
case FLOW_CLS_REPLACE:
- return sparx5_tc_flower_replace(ndev, fco, admin);
+ return sparx5_tc_flower_replace(ndev, fco, admin, ingress);
case FLOW_CLS_DESTROY:
return sparx5_tc_flower_destroy(ndev, fco, admin);
case FLOW_CLS_STATS:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
index 41e50743f3ac..561001ee0516 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
* Microchip VCAP API
*/
-/* This file is autogenerated by cml-utils 2022-12-06 12:43:54 +0100.
- * Commit ID: 3db2ac730f134c160496f2b9f10915e347d871cb
+/* This file is autogenerated by cml-utils 2023-01-17 16:55:38 +0100.
+ * Commit ID: cc027a9bd71002aebf074df5ad8584fe1545e05e
*/
#include <linux/types.h>
@@ -1333,6 +1333,909 @@ static const struct vcap_field is2_ip_7tuple_keyfield[] = {
},
};
+static const struct vcap_field es2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 99,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 147,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 195,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 196,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 212,
+ .width = 64,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 276,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 277,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 98,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 148,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 154,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 177,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 178,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 210,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 226,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 232,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 233,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 234,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 185,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field es2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 74,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 75,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 96,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 193,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 202,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 330,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 458,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 459,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 460,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 461,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 477,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 493,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 509,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 510,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 511,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 512,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 513,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 514,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 515,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 516,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 517,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 100,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 229,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 237,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 253,
+ .width = 40,
+ },
+};
+
/* keyfield_set */
static const struct vcap_set is0_keyfield_set[] = {
[VCAP_KFS_NORMAL_7TUPLE] = {
@@ -1380,6 +2283,39 @@ static const struct vcap_set is2_keyfield_set[] = {
},
};
+static const struct vcap_set es2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = -1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
/* keyfield_set map */
static const struct vcap_field *is0_keyfield_set_map[] = {
[VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
@@ -1395,6 +2331,15 @@ static const struct vcap_field *is2_keyfield_set_map[] = {
[VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
};
+static const struct vcap_field *es2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = es2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
+};
+
/* keyfield_set map sizes */
static int is0_keyfield_set_map_size[] = {
[VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
@@ -1410,6 +2355,15 @@ static int is2_keyfield_set_map_size[] = {
[VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
};
+static int es2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
+};
+
/* actionfields */
static const struct vcap_field is0_classification_actionfield[] = {
[VCAP_AF_TYPE] = {
@@ -1798,6 +2752,79 @@ static const struct vcap_field is2_base_type_actionfield[] = {
},
};
+static const struct vcap_field es2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_AF_COPY_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 16,
+ },
+ [VCAP_AF_COPY_PORT_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 7,
+ },
+ [VCAP_AF_MIRROR_PROBE_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 33,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_REMARK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 34,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 6,
+ },
+ [VCAP_AF_ES2_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 11,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+};
+
/* actionfield_set */
static const struct vcap_set is0_actionfield_set[] = {
[VCAP_AFS_CLASSIFICATION] = {
@@ -1825,6 +2852,14 @@ static const struct vcap_set is2_actionfield_set[] = {
},
};
+static const struct vcap_set es2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
/* actionfield_set map */
static const struct vcap_field *is0_actionfield_set_map[] = {
[VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
@@ -1836,6 +2871,10 @@ static const struct vcap_field *is2_actionfield_set_map[] = {
[VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
};
+static const struct vcap_field *es2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield,
+};
+
/* actionfield_set map size */
static int is0_actionfield_set_map_size[] = {
[VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
@@ -1847,6 +2886,10 @@ static int is2_actionfield_set_map_size[] = {
[VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
};
+static int es2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield),
+};
+
/* Type Groups */
static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = {
{
@@ -2004,6 +3047,52 @@ static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
{}
};
+static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = {
[12] = is0_x12_keyfield_set_typegroups,
[6] = is0_x6_keyfield_set_typegroups,
@@ -2021,6 +3110,14 @@ static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
[13] = NULL,
};
+static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = {
+ [12] = es2_x12_keyfield_set_typegroups,
+ [6] = es2_x6_keyfield_set_typegroups,
+ [3] = es2_x3_keyfield_set_typegroups,
+ [1] = es2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = {
{
.offset = 0,
@@ -2086,6 +3183,29 @@ static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
{}
};
+static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 21,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 42,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = {
[3] = is0_x3_actionfield_set_typegroups,
[2] = is0_x2_actionfield_set_typegroups,
@@ -2099,6 +3219,12 @@ static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
[13] = NULL,
};
+static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = {
+ [3] = es2_x3_actionfield_set_typegroups,
+ [1] = es2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
/* Keyfieldset names */
static const char * const vcap_keyfield_set_names[] = {
[VCAP_KFS_NO_VALUE] = "(None)",
@@ -2156,14 +3282,18 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
[VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
[VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
[VCAP_KF_ETYPE] = "ETYPE",
[VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
[VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
[VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
[VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
[VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
[VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
[VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
[VCAP_KF_IP4_IS] = "IP4_IS",
[VCAP_KF_IP_MC_IS] = "IP_MC_IS",
[VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
@@ -2183,6 +3313,7 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_L2_SMAC] = "L2_SMAC",
[VCAP_KF_L2_SNAP] = "L2_SNAP",
[VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
[VCAP_KF_L3_DSCP] = "L3_DSCP",
[VCAP_KF_L3_DST_IS] = "L3_DST_IS",
[VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
@@ -2236,6 +3367,8 @@ static const char * const vcap_actionfield_names[] = {
[VCAP_AF_ACL_ID] = "ACL_ID",
[VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
[VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
[VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
[VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
[VCAP_AF_DEI_ENA] = "DEI_ENA",
@@ -2244,7 +3377,9 @@ static const char * const vcap_actionfield_names[] = {
[VCAP_AF_DP_VAL] = "DP_VAL",
[VCAP_AF_DSCP_ENA] = "DSCP_ENA",
[VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
[VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
[VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
[VCAP_AF_HOST_MATCH] = "HOST_MATCH",
[VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
@@ -2261,6 +3396,7 @@ static const char * const vcap_actionfield_names[] = {
[VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
[VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
[VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
[VCAP_AF_NXT_IDX] = "NXT_IDX",
[VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
[VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
@@ -2271,6 +3407,7 @@ static const char * const vcap_actionfield_names[] = {
[VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
[VCAP_AF_POLICE_ENA] = "POLICE_ENA",
[VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
[VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
[VCAP_AF_PORT_MASK] = "PORT_MASK",
[VCAP_AF_QOS_ENA] = "QOS_ENA",
@@ -2325,11 +3462,32 @@ const struct vcap_info sparx5_vcaps[] = {
.keyfield_set_typegroups = is2_keyfield_set_typegroups,
.actionfield_set_typegroups = is2_actionfield_set_typegroups,
},
+ [VCAP_TYPE_ES2] = {
+ .name = "es2",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 21,
+ .default_cnt = 74,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set),
+ .actionfield_set = es2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set),
+ .keyfield_set_map = es2_keyfield_set_map,
+ .keyfield_set_map_size = es2_keyfield_set_map_size,
+ .actionfield_set_map = es2_actionfield_set_map,
+ .actionfield_set_map_size = es2_actionfield_set_map_size,
+ .keyfield_set_typegroups = es2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es2_actionfield_set_typegroups,
+ },
};
const struct vcap_statistics sparx5_vcap_stats = {
.name = "sparx5",
- .count = 2,
+ .count = 3,
.keyfield_set_names = vcap_keyfield_set_names,
.actionfield_set_names = vcap_actionfield_set_names,
.keyfield_names = vcap_keyfield_names,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
index 58f86dfa54bb..f3b2e58af212 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
@@ -284,6 +284,119 @@ static void sparx5_vcap_is2_port_stickies(struct sparx5 *sparx5,
out->prf(out->dst, "\n");
}
+static void sparx5_vcap_es2_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5, EACL_VCAP_ES2_KEY_SEL(port->portno,
+ lookup));
+ out->prf(out->dst, "\n state: ");
+ if (EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ out->prf(out->dst, "\n arp: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_ARP_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_ARP_ARP:
+ out->prf(out->dst, "arp");
+ break;
+ }
+ out->prf(out->dst, "\n ipv4: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV4_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_IPV4_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID:
+ out->prf(out->dst, "ip4_tcp_udp ip4_vid");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_VID:
+ out->prf(out->dst, "ip4_vid");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_OTHER:
+ out->prf(out->dst, "ip4_other");
+ break;
+ }
+ out->prf(out->dst, "\n ipv6: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV6_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_VID:
+ out->prf(out->dst, "ip_7tuple ip6_vid");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_STD:
+ out->prf(out->dst, "ip_7tuple ip6_std");
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_VID:
+ out->prf(out->dst, "ip6_vid");
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_STD:
+ out->prf(out->dst, "ip6_std");
+ break;
+ case VCAP_ES2_PS_IPV6_IP4_DOWNGRADE:
+ out->prf(out->dst, "ip4_downgrade");
+ break;
+ }
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_es2_port_stickies(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " Sticky bits: ");
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ value = spx5_rd(sparx5, EACL_SEC_LOOKUP_STICKY(lookup));
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(value))
+ out->prf(out->dst, " ip_7tuple");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip6_vid");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(value))
+ out->prf(out->dst, " ip6_std");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(value))
+ out->prf(out->dst, " ip4_tcp_udp");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip4_vid");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(value))
+ out->prf(out->dst, " ip4_other");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(value))
+ out->prf(out->dst, " arp");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(value))
+ out->prf(out->dst, " mac_etype");
+ /* Clear stickies */
+ spx5_wr(value, sparx5, EACL_SEC_LOOKUP_STICKY(lookup));
+ }
+ out->prf(out->dst, "\n");
+}
+
/* Provide port information via a callback interface */
int sparx5_port_info(struct net_device *ndev,
struct vcap_admin *admin,
@@ -305,6 +418,10 @@ int sparx5_port_info(struct net_device *ndev,
sparx5_vcap_is2_port_keys(sparx5, admin, port, out);
sparx5_vcap_is2_port_stickies(sparx5, admin, out);
break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_port_keys(sparx5, admin, port, out);
+ sparx5_vcap_es2_port_stickies(sparx5, admin, out);
+ break;
default:
out->prf(out->dst, " no info\n");
break;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index 92073bfddc99..cadc4926d550 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -37,6 +37,13 @@
ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(_mpls_mc) | \
ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(_mlbs))
+#define SPARX5_ES2_LOOKUPS 2
+#define VCAP_ES2_KEYSEL(_ena, _arp, _ipv4, _ipv6) \
+ (EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(_ena) | \
+ EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(_arp) | \
+ EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(_ipv4) | \
+ EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(_ipv6))
+
static struct sparx5_vcap_inst {
enum vcap_type vtype; /* type of vcap */
int vinst; /* instance number within the same type */
@@ -48,6 +55,7 @@ static struct sparx5_vcap_inst {
int map_id; /* id in the super vcap block mapping (if applicable) */
int blockno; /* starting block in super vcap (if applicable) */
int blocks; /* number of blocks in super vcap (if applicable) */
+ bool ingress; /* is vcap in the ingress path */
} sparx5_vcap_inst_cfg[] = {
{
.vtype = VCAP_TYPE_IS0, /* CLM-0 */
@@ -59,6 +67,7 @@ static struct sparx5_vcap_inst {
.last_cid = SPARX5_VCAP_CID_IS0_L2 - 1,
.blockno = 8, /* Maps block 8-9 */
.blocks = 2,
+ .ingress = true,
},
{
.vtype = VCAP_TYPE_IS0, /* CLM-1 */
@@ -70,6 +79,7 @@ static struct sparx5_vcap_inst {
.last_cid = SPARX5_VCAP_CID_IS0_L4 - 1,
.blockno = 6, /* Maps block 6-7 */
.blocks = 2,
+ .ingress = true,
},
{
.vtype = VCAP_TYPE_IS0, /* CLM-2 */
@@ -81,6 +91,7 @@ static struct sparx5_vcap_inst {
.last_cid = SPARX5_VCAP_CID_IS0_MAX,
.blockno = 4, /* Maps block 4-5 */
.blocks = 2,
+ .ingress = true,
},
{
.vtype = VCAP_TYPE_IS2, /* IS2-0 */
@@ -92,6 +103,7 @@ static struct sparx5_vcap_inst {
.last_cid = SPARX5_VCAP_CID_IS2_L2 - 1,
.blockno = 0, /* Maps block 0-1 */
.blocks = 2,
+ .ingress = true,
},
{
.vtype = VCAP_TYPE_IS2, /* IS2-1 */
@@ -103,6 +115,16 @@ static struct sparx5_vcap_inst {
.last_cid = SPARX5_VCAP_CID_IS2_MAX,
.blockno = 2, /* Maps block 2-3 */
.blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_ES2,
+ .lookups = SPARX5_ES2_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES2_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES2_L0,
+ .last_cid = SPARX5_VCAP_CID_ES2_MAX,
+ .count = 12288, /* Addresses according to datasheet */
+ .ingress = false,
},
};
@@ -121,6 +143,14 @@ static u16 sparx5_vcap_is2_known_etypes[] = {
ETH_P_IPV6,
};
+/* These protocols have dedicated keysets in ES2 and a TC dissector */
+static u16 sparx5_vcap_es2_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_ARP,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
static void sparx5_vcap_type_err(struct sparx5 *sparx5,
struct vcap_admin *admin,
const char *fname)
@@ -139,25 +169,57 @@ static void sparx5_vcap_wait_super_update(struct sparx5 *sparx5)
false, sparx5, VCAP_SUPER_CTRL);
}
-/* Initializing a VCAP address range: IS0 and IS2 for now */
+/* Await the ES2 VCAP completion of the current operation */
+static void sparx5_vcap_wait_es2_update(struct sparx5 *sparx5)
+{
+ u32 value;
+
+ read_poll_timeout(spx5_rd, value,
+ !VCAP_ES2_CTRL_UPDATE_SHOT_GET(value), 500, 10000,
+ false, sparx5, VCAP_ES2_CTRL);
+}
+
+/* Initializing a VCAP address range */
static void _sparx5_vcap_range_init(struct sparx5 *sparx5,
struct vcap_admin *admin,
u32 addr, u32 count)
{
u32 size = count - 1;
- spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
- VCAP_SUPER_CFG_MV_SIZE_SET(size),
- sparx5, VCAP_SUPER_CFG);
- spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
- VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
- VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) |
- VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
- sparx5, VCAP_SUPER_CTRL);
- sparx5_vcap_wait_super_update(sparx5);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+ break;
+ case VCAP_TYPE_ES2:
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES2_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
/* Initializing VCAP rule data area */
@@ -198,9 +260,15 @@ static bool sparx5_vcap_is2_is_first_chain(struct vcap_rule *rule)
rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L3));
}
+static bool sparx5_vcap_es2_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= SPARX5_VCAP_CID_ES2_L0 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_ES2_L1);
+}
+
/* Set the narrow range ingress port mask on a rule */
-static void sparx5_vcap_add_range_port_mask(struct vcap_rule *rule,
- struct net_device *ndev)
+static void sparx5_vcap_add_ingress_range_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
{
struct sparx5_port *port = netdev_priv(ndev);
u32 port_mask;
@@ -230,6 +298,27 @@ static void sparx5_vcap_add_wide_port_mask(struct vcap_rule *rule,
vcap_rule_add_key_u72(rule, VCAP_KF_IF_IGR_PORT_MASK, &port_mask);
}
+static void sparx5_vcap_add_egress_range_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 port_mask;
+ u32 range;
+
+ /* Mask range selects:
+ * 0-2: Physical/Logical egress port number 0-31, 32–63, 64.
+ * 3-5: Virtual Interface Number 0-31, 32-63, 64.
+ * 6: CPU queue Number 0-7.
+ *
+ * Use physical/logical port ranges (0-2)
+ */
+ range = port->portno / BITS_PER_TYPE(u32);
+ /* Port bit set to match-any */
+ port_mask = ~BIT(port->portno % BITS_PER_TYPE(u32));
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_MASK_RNG, range, 0xf);
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_MASK, 0, port_mask);
+}
+
/* Convert IS0 chain id to vcap lookup id */
static int sparx5_vcap_is0_cid_to_lookup(int cid)
{
@@ -264,6 +353,17 @@ static int sparx5_vcap_is2_cid_to_lookup(int cid)
return lookup;
}
+/* Convert ES2 chain id to vcap lookup id */
+static int sparx5_vcap_es2_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= SPARX5_VCAP_CID_ES2_L1)
+ lookup = 1;
+
+ return lookup;
+}
+
/* Add ethernet type IS0 keyset to a list */
static void
sparx5_vcap_is0_get_port_etype_keysets(struct vcap_keyset_list *keysetlist,
@@ -435,6 +535,97 @@ static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
return 0;
}
+/* Return the keysets for the vcap port IP4 traffic class configuration */
+static void
+sparx5_vcap_es2_get_port_ipv4_keysets(struct vcap_keyset_list *keysetlist,
+ u32 value)
+{
+ switch (EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV4_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_IPV4_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_VID:
+ /* Not used */
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ }
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_es2_get_port_keysets(struct net_device *ndev,
+ int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
+ switch (EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_ARP_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_ARP_ARP:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP)
+ sparx5_vcap_es2_get_port_ipv4_keysets(keysetlist, value);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV6_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_VID:
+ /* Not used */
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_ES2_PS_IPV6_IP4_DOWNGRADE:
+ sparx5_vcap_es2_get_port_ipv4_keysets(keysetlist,
+ value);
+ break;
+ }
+ }
+
+ if (l3_proto != ETH_P_ARP && l3_proto != ETH_P_IP &&
+ l3_proto != ETH_P_IPV6) {
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ }
+ return 0;
+}
+
/* Get the port keyset for the vcap lookup */
int sparx5_vcap_get_port_keyset(struct net_device *ndev,
struct vcap_admin *admin,
@@ -456,6 +647,11 @@ int sparx5_vcap_get_port_keyset(struct net_device *ndev,
err = sparx5_vcap_is2_get_port_keysets(ndev, lookup, kslist,
l3_proto);
break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(cid);
+ err = sparx5_vcap_es2_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
default:
port = netdev_priv(ndev);
sparx5_vcap_type_err(port->sparx5, admin, __func__);
@@ -479,6 +675,10 @@ bool sparx5_vcap_is_known_etype(struct vcap_admin *admin, u16 etype)
known_etypes = sparx5_vcap_is2_known_etypes;
size = ARRAY_SIZE(sparx5_vcap_is2_known_etypes);
break;
+ case VCAP_TYPE_ES2:
+ known_etypes = sparx5_vcap_es2_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_es2_known_etypes);
+ break;
default:
return false;
}
@@ -519,6 +719,11 @@ sparx5_vcap_validate_keyset(struct net_device *ndev,
sparx5_vcap_is2_get_port_keysets(ndev, lookup, &keysetlist,
l3_proto);
break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_es2_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
default:
port = netdev_priv(ndev);
sparx5_vcap_type_err(port->sparx5, admin, __func__);
@@ -538,43 +743,82 @@ sparx5_vcap_validate_keyset(struct net_device *ndev,
return -ENOENT;
}
-/* API callback used for adding default fields to a rule */
-static void sparx5_vcap_add_default_fields(struct net_device *ndev,
- struct vcap_admin *admin,
- struct vcap_rule *rule)
+static void sparx5_vcap_ingress_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
{
const struct vcap_field *field;
- struct sparx5_port *port;
- bool is_first = true;
+ bool is_first;
+ /* Add ingress port mask matching the net device */
field = vcap_lookup_keyfield(rule, VCAP_KF_IF_IGR_PORT_MASK);
if (field && field->width == SPX5_PORTS)
sparx5_vcap_add_wide_port_mask(rule, ndev);
else if (field && field->width == BITS_PER_TYPE(u32))
- sparx5_vcap_add_range_port_mask(rule, ndev);
+ sparx5_vcap_add_ingress_range_port_mask(rule, ndev);
else
pr_err("%s:%d: %s: could not add an ingress port mask for: %s\n",
__func__, __LINE__, netdev_name(ndev),
sparx5_vcap_keyset_name(ndev, rule->keyset));
+ if (admin->vtype == VCAP_TYPE_IS0)
+ is_first = sparx5_vcap_is0_is_first_chain(rule);
+ else
+ is_first = sparx5_vcap_is2_is_first_chain(rule);
+
+ /* Add key that selects the first/second lookup */
+ if (is_first)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+static void sparx5_vcap_es2_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ const struct vcap_field *field;
+ bool is_first;
+
+ /* Add egress port mask matching the net device */
+ field = vcap_lookup_keyfield(rule, VCAP_KF_IF_EGR_PORT_MASK);
+ if (field)
+ sparx5_vcap_add_egress_range_port_mask(rule, ndev);
+
+ /* Add key that selects the first/second lookup */
+ is_first = sparx5_vcap_es2_is_first_chain(rule);
+
+ if (is_first)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+/* API callback used for adding default fields to a rule */
+static void sparx5_vcap_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct sparx5_port *port;
+
/* add the lookup bit */
switch (admin->vtype) {
case VCAP_TYPE_IS0:
- is_first = sparx5_vcap_is0_is_first_chain(rule);
- break;
case VCAP_TYPE_IS2:
- is_first = sparx5_vcap_is2_is_first_chain(rule);
+ sparx5_vcap_ingress_add_default_fields(ndev, admin, rule);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_add_default_fields(ndev, admin, rule);
break;
default:
port = netdev_priv(ndev);
sparx5_vcap_type_err(port->sparx5, admin, __func__);
break;
}
-
- if (is_first)
- vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
- else
- vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0);
}
/* API callback used for erasing the vcap cache area (not the register area) */
@@ -586,21 +830,60 @@ static void sparx5_vcap_cache_erase(struct vcap_admin *admin)
memset(&admin->cache.counter, 0, sizeof(admin->cache.counter));
}
-/* API callback used for writing to the VCAP cache */
-static void sparx5_vcap_cache_write(struct net_device *ndev,
- struct vcap_admin *admin,
- enum vcap_selection sel,
- u32 start,
- u32 count)
+static void sparx5_vcap_is0_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+
+ if (sel & VCAP_SEL_COUNTER)
+ spx5_wr(admin->cache.counter, sparx5,
+ VCAP_SUPER_VCAP_CNT_DAT(0));
+}
+
+static void sparx5_vcap_is2_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
{
- struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5 = port->sparx5;
u32 *keystr, *mskstr, *actstr;
int idx;
keystr = &admin->cache.keystream[start];
mskstr = &admin->cache.maskstream[start];
actstr = &admin->cache.actionstream[start];
+
switch (sel) {
case VCAP_SEL_ENTRY:
for (idx = 0; idx < count; ++idx) {
@@ -624,44 +907,99 @@ static void sparx5_vcap_cache_write(struct net_device *ndev,
break;
}
if (sel & VCAP_SEL_COUNTER) {
- switch (admin->vtype) {
- case VCAP_TYPE_IS0:
+ start = start & 0xfff; /* counter limit */
+ if (admin->vinst == 0)
spx5_wr(admin->cache.counter, sparx5,
- VCAP_SUPER_VCAP_CNT_DAT(0));
- break;
- case VCAP_TYPE_IS2:
- start = start & 0xfff; /* counter limit */
- if (admin->vinst == 0)
- spx5_wr(admin->cache.counter, sparx5,
- ANA_ACL_CNT_A(start));
- else
- spx5_wr(admin->cache.counter, sparx5,
- ANA_ACL_CNT_B(start));
- spx5_wr(admin->cache.sticky, sparx5,
- VCAP_SUPER_VCAP_CNT_DAT(0));
- break;
- default:
- sparx5_vcap_type_err(sparx5, admin, __func__);
- break;
+ ANA_ACL_CNT_A(start));
+ else
+ spx5_wr(admin->cache.counter, sparx5,
+ ANA_ACL_CNT_B(start));
+ spx5_wr(admin->cache.sticky, sparx5,
+ VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+static void sparx5_vcap_es2_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_ES2_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_ES2_VCAP_MASK_DAT(idx));
}
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_ES2_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0x7ff; /* counter limit */
+ spx5_wr(admin->cache.counter, sparx5, EACL_ES2_CNT(start));
+ spx5_wr(admin->cache.sticky, sparx5, VCAP_ES2_VCAP_CNT_DAT(0));
}
}
-/* API callback used for reading from the VCAP into the VCAP cache */
-static void sparx5_vcap_cache_read(struct net_device *ndev,
- struct vcap_admin *admin,
- enum vcap_selection sel,
- u32 start,
- u32 count)
+/* API callback used for writing to the VCAP cache */
+static void sparx5_vcap_cache_write(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
{
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_cache_write(sparx5, admin, sel, start, count);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+static void sparx5_vcap_is0_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
u32 *keystr, *mskstr, *actstr;
int idx;
keystr = &admin->cache.keystream[start];
mskstr = &admin->cache.maskstream[start];
actstr = &admin->cache.actionstream[start];
+
if (sel & VCAP_SEL_ENTRY) {
for (idx = 0; idx < count; ++idx) {
keystr[idx] = spx5_rd(sparx5,
@@ -670,35 +1008,120 @@ static void sparx5_vcap_cache_read(struct net_device *ndev,
VCAP_SUPER_VCAP_MASK_DAT(idx));
}
}
- if (sel & VCAP_SEL_ACTION) {
+
+ if (sel & VCAP_SEL_ACTION)
for (idx = 0; idx < count; ++idx)
actstr[idx] = spx5_rd(sparx5,
VCAP_SUPER_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+static void sparx5_vcap_is2_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] = ~spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
}
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+
if (sel & VCAP_SEL_COUNTER) {
- switch (admin->vtype) {
- case VCAP_TYPE_IS0:
+ start = start & 0xfff; /* counter limit */
+ if (admin->vinst == 0)
admin->cache.counter =
- spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
- admin->cache.sticky =
- spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
- break;
- case VCAP_TYPE_IS2:
- start = start & 0xfff; /* counter limit */
- if (admin->vinst == 0)
- admin->cache.counter =
- spx5_rd(sparx5, ANA_ACL_CNT_A(start));
- else
- admin->cache.counter =
- spx5_rd(sparx5, ANA_ACL_CNT_B(start));
- admin->cache.sticky =
- spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
- break;
- default:
- sparx5_vcap_type_err(sparx5, admin, __func__);
- break;
+ spx5_rd(sparx5, ANA_ACL_CNT_A(start));
+ else
+ admin->cache.counter =
+ spx5_rd(sparx5, ANA_ACL_CNT_B(start));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+static void sparx5_vcap_es2_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] =
+ ~spx5_rd(sparx5, VCAP_ES2_VCAP_MASK_DAT(idx));
}
}
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0x7ff; /* counter limit */
+ admin->cache.counter =
+ spx5_rd(sparx5, EACL_ES2_CNT(start));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_CNT_DAT(0));
+ }
+}
+
+/* API callback used for reading from the VCAP into the VCAP cache */
+static void sparx5_vcap_cache_read(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_cache_read(sparx5, admin, sel, start, count);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
/* API callback used for initializing a VCAP address range */
@@ -712,16 +1135,12 @@ static void sparx5_vcap_range_init(struct net_device *ndev,
_sparx5_vcap_range_init(sparx5, admin, addr, count);
}
-/* API callback used for updating the VCAP cache, IS0 and IS2 for now */
-static void sparx5_vcap_update(struct net_device *ndev,
- struct vcap_admin *admin, enum vcap_command cmd,
- enum vcap_selection sel, u32 addr)
+static void sparx5_vcap_super_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
{
- struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5 = port->sparx5;
- bool clear;
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
- clear = (cmd == VCAP_CMD_INITIALIZE);
spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
VCAP_SUPER_CFG_MV_SIZE_SET(0), sparx5, VCAP_SUPER_CFG);
spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) |
@@ -735,6 +1154,87 @@ static void sparx5_vcap_update(struct net_device *ndev,
sparx5_vcap_wait_super_update(sparx5);
}
+static void sparx5_vcap_es2_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES2_CFG_MV_SIZE_SET(0), sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+}
+
+/* API callback used for updating the VCAP cache */
+static void sparx5_vcap_update(struct net_device *ndev,
+ struct vcap_admin *admin, enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_super_update(sparx5, cmd, sel, addr);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_update(sparx5, cmd, sel, addr);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+static void sparx5_vcap_super_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+}
+
+static void sparx5_vcap_es2_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_ES2_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+}
+
/* API callback used for moving a block of rules in the VCAP */
static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
u32 addr, int offset, int count)
@@ -753,18 +1253,19 @@ static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
mv_num_pos = -offset - 1;
cmd = VCAP_CMD_MOVE_UP;
}
- spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(mv_num_pos) |
- VCAP_SUPER_CFG_MV_SIZE_SET(mv_size),
- sparx5, VCAP_SUPER_CFG);
- spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) |
- VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
- VCAP_SUPER_CTRL_CLEAR_CACHE_SET(false) |
- VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
- sparx5, VCAP_SUPER_CTRL);
- sparx5_vcap_wait_super_update(sparx5);
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_super_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
static struct vcap_operations sparx5_vcap_ops = {
@@ -832,6 +1333,22 @@ static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
ANA_ACL_VCAP_S2_CFG(portno));
}
+/* Enable ES2 lookups per port and set the keyset generation */
+static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+ u32 keysel;
+
+ keysel = VCAP_ES2_KEYSEL(true, VCAP_ES2_PS_ARP_MAC_ETYPE,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE);
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_wr(keysel, sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+}
+
/* Enable lookups per port and set the keyset generation */
static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
@@ -843,6 +1360,9 @@ static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
case VCAP_TYPE_IS2:
sparx5_vcap_is2_port_key_selection(sparx5, admin);
break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_port_key_selection(sparx5, admin);
+ break;
default:
sparx5_vcap_type_err(sparx5, admin, __func__);
break;
@@ -871,6 +1391,14 @@ static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
sparx5,
ANA_ACL_VCAP_S2_CFG(portno));
break;
+ case VCAP_TYPE_ES2:
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(0),
+ EACL_VCAP_ES2_KEY_SEL_KEY_ENA,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
default:
sparx5_vcap_type_err(sparx5, admin, __func__);
break;
@@ -904,6 +1432,7 @@ sparx5_vcap_admin_alloc(struct sparx5 *sparx5, struct vcap_control *ctrl,
mutex_init(&admin->lock);
admin->vtype = cfg->vtype;
admin->vinst = cfg->vinst;
+ admin->ingress = cfg->ingress;
admin->lookups = cfg->lookups;
admin->lookups_per_instance = cfg->lookups_per_instance;
admin->first_cid = cfg->first_cid;
@@ -927,22 +1456,43 @@ static void sparx5_vcap_block_alloc(struct sparx5 *sparx5,
struct vcap_admin *admin,
const struct sparx5_vcap_inst *cfg)
{
- int idx;
+ int idx, cores;
- /* Super VCAP block mapping and address configuration. Block 0
- * is assigned addresses 0 through 3071, block 1 is assigned
- * addresses 3072 though 6143, and so on.
- */
- for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks; ++idx) {
- spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5,
- VCAP_SUPER_IDX);
- spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id), sparx5,
- VCAP_SUPER_MAP);
- }
- admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE;
- admin->last_used_addr = admin->first_valid_addr +
- cfg->blocks * SUPER_VCAP_BLK_SIZE;
- admin->last_valid_addr = admin->last_used_addr - 1;
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ /* Super VCAP block mapping and address configuration. Block 0
+ * is assigned addresses 0 through 3071, block 1 is assigned
+ * addresses 3072 though 6143, and so on.
+ */
+ for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks;
+ ++idx) {
+ spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_SUPER_IDX);
+ spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id),
+ sparx5, VCAP_SUPER_MAP);
+ }
+ admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE;
+ admin->last_used_addr = admin->first_valid_addr +
+ cfg->blocks * SUPER_VCAP_BLK_SIZE;
+ admin->last_valid_addr = admin->last_used_addr - 1;
+ break;
+ case VCAP_TYPE_ES2:
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+ cores = spx5_rd(sparx5, VCAP_ES2_CORE_CNT);
+ for (idx = 0; idx < cores; ++idx) {
+ spx5_wr(VCAP_ES2_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_ES2_IDX);
+ spx5_wr(VCAP_ES2_MAP_CORE_MAP_SET(1), sparx5,
+ VCAP_ES2_MAP);
+ }
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
/* Allocate a vcap control and vcap instances and configure the system */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
index aabdf4355103..46a08d5aff3d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -32,6 +32,11 @@
#define SPARX5_VCAP_CID_IS2_MAX \
(VCAP_CID_INGRESS_STAGE2_L3 + VCAP_CID_LOOKUP_SIZE - 1) /* IS2 Max */
+#define SPARX5_VCAP_CID_ES2_L0 VCAP_CID_EGRESS_STAGE2_L0 /* ES2 lookup 0 */
+#define SPARX5_VCAP_CID_ES2_L1 VCAP_CID_EGRESS_STAGE2_L1 /* ES2 lookup 1 */
+#define SPARX5_VCAP_CID_ES2_MAX \
+ (VCAP_CID_EGRESS_STAGE2_L1 + VCAP_CID_LOOKUP_SIZE - 1) /* ES2 Max */
+
/* IS0 port keyset selection control */
/* IS0 ethernet, IPv4, IPv6 traffic type keyset generation */
@@ -129,6 +134,35 @@ enum vcap_is2_port_sel_arp {
VCAP_IS2_PS_ARP_ARP,
};
+/* ES2 port keyset selection control */
+
+/* ES2 IPv4 traffic type keyset generation */
+enum vcap_es2_port_sel_ipv4 {
+ VCAP_ES2_PS_IPV4_MAC_ETYPE,
+ VCAP_ES2_PS_IPV4_IP_7TUPLE,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
+ VCAP_ES2_PS_IPV4_IP4_VID,
+ VCAP_ES2_PS_IPV4_IP4_OTHER,
+};
+
+/* ES2 IPv6 traffic type keyset generation */
+enum vcap_es2_port_sel_ipv6 {
+ VCAP_ES2_PS_IPV6_MAC_ETYPE,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE_VID,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE_STD,
+ VCAP_ES2_PS_IPV6_IP6_VID,
+ VCAP_ES2_PS_IPV6_IP6_STD,
+ VCAP_ES2_PS_IPV6_IP4_DOWNGRADE,
+};
+
+/* ES2 ARP traffic type keyset generation */
+enum vcap_es2_port_sel_arp {
+ VCAP_ES2_PS_ARP_MAC_ETYPE,
+ VCAP_ES2_PS_ARP_ARP,
+};
+
/* Get the port keyset for the vcap lookup */
int sparx5_vcap_get_port_keyset(struct net_device *ndev,
struct vcap_admin *admin,
diff --git a/drivers/net/ethernet/microchip/vcap/Makefile b/drivers/net/ethernet/microchip/vcap/Makefile
index 0adb8f5a8735..c86f20e6491f 100644
--- a/drivers/net/ethernet/microchip/vcap/Makefile
+++ b/drivers/net/ethernet/microchip/vcap/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_VCAP) += vcap.o
obj-$(CONFIG_VCAP_KUNIT_TEST) += vcap_model_kunit.o
vcap-$(CONFIG_DEBUG_FS) += vcap_api_debugfs.o
-vcap-y += vcap_api.o
+vcap-y += vcap_api.o vcap_tc.o
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
index 962383f20f1b..9c6766c4b75d 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
@@ -1,10 +1,10 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
* Microchip VCAP API
*/
-/* This file is autogenerated by cml-utils 2022-12-06 09:49:28 +0100.
- * Commit ID: cd9451f1b9d8cafa58f845de66a6e373658019ef
+/* This file is autogenerated by cml-utils 2023-01-17 16:52:16 +0100.
+ * Commit ID: 229ec79be5df142c1f335a01d0e63232d4feb2ba
*/
#ifndef __VCAP_AG_API__
@@ -276,7 +276,8 @@ enum vcap_keyfield_set {
* Select the mode of the Generic Index
* VCAP_KF_LOOKUP_PAG: W8, sparx5: is2, lan966x: is2
* Classified Policy Association Group: chains rules from IS1/CLM to IS2
- * VCAP_KF_MIRROR_ENA: *** No docstring ***
+ * VCAP_KF_MIRROR_PROBE: W2, sparx5: es2
+ * Identifies frame copies generated as a result of mirroring
* VCAP_KF_OAM_CCM_CNTS_EQ0: W1, sparx5: is2/es2, lan966x: is2
* Dual-ended loss measurement counters in CCM frames are all zero
* VCAP_KF_OAM_DETECTED: W1, lan966x: is2
@@ -407,7 +408,7 @@ enum vcap_key_field {
VCAP_KF_LOOKUP_GEN_IDX,
VCAP_KF_LOOKUP_GEN_IDX_SEL,
VCAP_KF_LOOKUP_PAG,
- VCAP_KF_MIRROR_ENA,
+ VCAP_KF_MIRROR_PROBE,
VCAP_KF_OAM_CCM_CNTS_EQ0,
VCAP_KF_OAM_DETECTED,
VCAP_KF_OAM_FLAGS,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index 83223c4770f2..6307d59f23da 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -1601,15 +1601,17 @@ struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid)
}
EXPORT_SYMBOL_GPL(vcap_find_admin);
-/* Is this the last admin instance ordered by chain id */
+/* Is this the last admin instance ordered by chain id and direction */
static bool vcap_admin_is_last(struct vcap_control *vctrl,
- struct vcap_admin *admin)
+ struct vcap_admin *admin,
+ bool ingress)
{
struct vcap_admin *iter, *last = NULL;
int max_cid = 0;
list_for_each_entry(iter, &vctrl->list, list) {
- if (iter->first_cid > max_cid) {
+ if (iter->first_cid > max_cid &&
+ iter->ingress == ingress) {
last = iter;
max_cid = iter->first_cid;
}
@@ -2753,7 +2755,7 @@ int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
EXPORT_SYMBOL_GPL(vcap_rule_get_key_u32);
/* Find a client action field in a rule */
-static struct vcap_client_actionfield *
+struct vcap_client_actionfield *
vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act)
{
struct vcap_rule_internal *ri = (struct vcap_rule_internal *)rule;
@@ -2764,6 +2766,7 @@ vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act)
return caf;
return NULL;
}
+EXPORT_SYMBOL_GPL(vcap_find_actionfield);
/* Check if the actionfield is already in the rule */
static bool vcap_actionfield_unique(struct vcap_rule *rule,
@@ -3177,7 +3180,7 @@ int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
EXPORT_SYMBOL_GPL(vcap_enable_lookups);
/* Is this chain id the last lookup of all VCAPs */
-bool vcap_is_last_chain(struct vcap_control *vctrl, int cid)
+bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress)
{
struct vcap_admin *admin;
int lookup;
@@ -3189,7 +3192,7 @@ bool vcap_is_last_chain(struct vcap_control *vctrl, int cid)
if (!admin)
return false;
- if (!vcap_admin_is_last(vctrl, admin))
+ if (!vcap_admin_is_last(vctrl, admin, ingress))
return false;
/* This must be the last lookup in this VCAP type */
@@ -3264,6 +3267,28 @@ static int vcap_rule_get_key(struct vcap_rule *rule,
return 0;
}
+/* Find a keyset having the same size as the provided rule, where the keyset
+ * does not have a type id.
+ */
+static int vcap_rule_get_untyped_keyset(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_set *keyfield_set;
+ int idx;
+
+ keyfield_set = vctrl->vcaps[vt].keyfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) {
+ if (keyfield_set[idx].sw_per_item == ri->keyset_sw &&
+ keyfield_set[idx].type_id == (u8)-1) {
+ vcap_keyset_list_add(matches, idx);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
/* Get the keysets that matches the rule key type/mask */
int vcap_rule_get_keysets(struct vcap_rule_internal *ri,
struct vcap_keyset_list *matches)
@@ -3277,7 +3302,7 @@ int vcap_rule_get_keysets(struct vcap_rule_internal *ri,
err = vcap_rule_get_key(&ri->data, VCAP_KF_TYPE, &kf);
if (err)
- return err;
+ return vcap_rule_get_untyped_keyset(ri, matches);
if (kf.ctrl.type == VCAP_FIELD_BIT) {
value = kf.data.u1.value;
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h
index 40491116b0a9..62db270f65af 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h
@@ -176,6 +176,7 @@ struct vcap_admin {
int first_valid_addr; /* bottom of address range to be used */
int last_used_addr; /* address of lowest added rule */
bool w32be; /* vcap uses "32bit-word big-endian" encoding */
+ bool ingress; /* chain traffic direction */
struct vcap_cache_data cache; /* encoded rule data */
};
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
index 69ea230ba8a1..417af9754bcc 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -222,7 +222,7 @@ int vcap_chain_offset(struct vcap_control *vctrl, int from_cid, int to_cid);
/* Is the next chain id in the following lookup, possible in another VCAP */
bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid);
/* Is this chain id the last lookup of all VCAPs */
-bool vcap_is_last_chain(struct vcap_control *vctrl, int cid);
+bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress);
/* Provide all rules via a callback interface */
int vcap_rule_iter(struct vcap_control *vctrl,
int (*callback)(void *, struct vcap_rule *), void *arg);
@@ -268,4 +268,7 @@ int vcap_rule_mod_action_u32(struct vcap_rule *rule,
/* Get a 32 bit key field value and mask from the rule */
int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
u32 *value, u32 *mask);
+
+struct vcap_client_actionfield *
+vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act);
#endif /* __VCAP_API_CLIENT__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
index d49b1cf7712f..c2c3397c5898 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
@@ -44,11 +44,14 @@ static void vcap_debugfs_show_rule_keyfield(struct vcap_control *vctrl,
out->prf(out->dst, "%pI4h/%pI4h", &data->u32.value,
&data->u32.mask);
} else if (key == VCAP_KF_ETYPE ||
- key == VCAP_KF_IF_IGR_PORT_MASK) {
+ key == VCAP_KF_IF_IGR_PORT_MASK ||
+ key == VCAP_KF_IF_EGR_PORT_MASK) {
hex = true;
} else {
u32 fmsk = (1 << keyfield[key].width) - 1;
+ if (keyfield[key].width == 32)
+ fmsk = ~0;
out->prf(out->dst, "%u/%u", data->u32.value & fmsk,
data->u32.mask & fmsk);
}
@@ -277,6 +280,7 @@ static void vcap_show_admin_info(struct vcap_control *vctrl,
out->prf(out->dst, "version: %d\n", vcap->version);
out->prf(out->dst, "vtype: %d\n", admin->vtype);
out->prf(out->dst, "vinst: %d\n", admin->vinst);
+ out->prf(out->dst, "ingress: %d\n", admin->ingress);
out->prf(out->dst, "first_cid: %d\n", admin->first_cid);
out->prf(out->dst, "last_cid: %d\n", admin->last_cid);
out->prf(out->dst, "lookups: %d\n", admin->lookups);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
index cbf7e0f110b8..b9c1c9d5eee8 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -389,6 +389,7 @@ static const char * const test_admin_info_expect[] = {
"version: 1\n",
"vtype: 2\n",
"vinst: 0\n",
+ "ingress: 1\n",
"first_cid: 10000\n",
"last_cid: 19999\n",
"lookups: 4\n",
@@ -407,6 +408,7 @@ static void vcap_api_show_admin_test(struct kunit *test)
.last_valid_addr = 3071,
.first_valid_addr = 0,
.last_used_addr = 794,
+ .ingress = true,
};
struct vcap_output_print out = {
.prf = (void *)test_prf,
@@ -435,6 +437,7 @@ static const char * const test_admin_expect[] = {
"version: 1\n",
"vtype: 2\n",
"vinst: 0\n",
+ "ingress: 1\n",
"first_cid: 8000000\n",
"last_cid: 8199999\n",
"lookups: 4\n",
@@ -496,6 +499,7 @@ static void vcap_api_show_admin_rule_test(struct kunit *test)
.last_valid_addr = 3071,
.first_valid_addr = 0,
.last_used_addr = 794,
+ .ingress = true,
.cache = {
.keystream = keydata,
.maskstream = mskdata,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index a31cd08e3752..b2753aac8ad2 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -2145,6 +2145,71 @@ static void vcap_api_filter_keylist_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, 26, idx);
}
+static void vcap_api_rule_chain_path_test(struct kunit *test)
+{
+ struct vcap_admin admin1 = {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 0,
+ .first_cid = 1000000,
+ .last_cid = 1199999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_enabled_port eport3 = {
+ .ndev = &test_netdev,
+ .cookie = 0x100,
+ .src_cid = 0,
+ .dst_cid = 1000000,
+ };
+ struct vcap_enabled_port eport2 = {
+ .ndev = &test_netdev,
+ .cookie = 0x200,
+ .src_cid = 1000000,
+ .dst_cid = 1100000,
+ };
+ struct vcap_enabled_port eport1 = {
+ .ndev = &test_netdev,
+ .cookie = 0x300,
+ .src_cid = 1100000,
+ .dst_cid = 8000000,
+ };
+ bool ret;
+ int chain;
+
+ vcap_test_api_init(&admin1);
+ list_add_tail(&eport1.list, &admin1.enabled);
+ list_add_tail(&eport2.list, &admin1.enabled);
+ list_add_tail(&eport3.list, &admin1.enabled);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1000000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1100000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1200000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 0);
+ KUNIT_EXPECT_EQ(test, 1000000, chain);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 1000000);
+ KUNIT_EXPECT_EQ(test, 1100000, chain);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 1100000);
+ KUNIT_EXPECT_EQ(test, 8000000, chain);
+}
+
+static struct kunit_case vcap_api_rule_enable_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_chain_path_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_enable_test_suite = {
+ .name = "VCAP_API_Rule_Enable_Testsuite",
+ .test_cases = vcap_api_rule_enable_test_cases,
+};
+
static struct kunit_suite vcap_api_rule_remove_test_suite = {
.name = "VCAP_API_Rule_Remove_Testsuite",
.test_cases = vcap_api_rule_remove_test_cases,
@@ -2235,6 +2300,7 @@ static struct kunit_suite vcap_api_encoding_test_suite = {
.test_cases = vcap_api_encoding_test_cases,
};
+kunit_test_suite(vcap_api_rule_enable_test_suite);
kunit_test_suite(vcap_api_rule_remove_test_suite);
kunit_test_suite(vcap_api_rule_insert_test_suite);
kunit_test_suite(vcap_api_rule_counter_test_suite);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
index 85a8d8566aa2..6d5d73d00562 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
@@ -1709,7 +1709,7 @@ static const struct vcap_field es2_mac_etype_keyfield[] = {
.offset = 96,
.width = 1,
},
- [VCAP_KF_MIRROR_ENA] = {
+ [VCAP_KF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 97,
.width = 2,
@@ -1847,7 +1847,7 @@ static const struct vcap_field es2_arp_keyfield[] = {
.offset = 95,
.width = 1,
},
- [VCAP_KF_MIRROR_ENA] = {
+ [VCAP_KF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 96,
.width = 2,
@@ -2010,7 +2010,7 @@ static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
.offset = 96,
.width = 1,
},
- [VCAP_KF_MIRROR_ENA] = {
+ [VCAP_KF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 97,
.width = 2,
@@ -2223,7 +2223,7 @@ static const struct vcap_field es2_ip4_other_keyfield[] = {
.offset = 96,
.width = 1,
},
- [VCAP_KF_MIRROR_ENA] = {
+ [VCAP_KF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 97,
.width = 2,
@@ -2376,7 +2376,7 @@ static const struct vcap_field es2_ip_7tuple_keyfield[] = {
.offset = 93,
.width = 1,
},
- [VCAP_KF_MIRROR_ENA] = {
+ [VCAP_KF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 94,
.width = 2,
@@ -2569,7 +2569,7 @@ static const struct vcap_field es2_ip4_vid_keyfield[] = {
.offset = 48,
.width = 1,
},
- [VCAP_KF_MIRROR_ENA] = {
+ [VCAP_KF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 49,
.width = 2,
@@ -3847,7 +3847,7 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
[VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
[VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
- [VCAP_KF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
[VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
[VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
[VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.c b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
new file mode 100644
index 000000000000..09a994a7cec2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP TC
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/flow_offload.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+
+#include "vcap_api_client.h"
+#include "vcap_tc.h"
+
+enum vcap_is2_arp_opcode {
+ VCAP_IS2_ARP_REQUEST,
+ VCAP_IS2_ARP_REPLY,
+ VCAP_IS2_RARP_REQUEST,
+ VCAP_IS2_RARP_REPLY,
+};
+
+enum vcap_arp_opcode {
+ VCAP_ARP_OP_RESERVED,
+ VCAP_ARP_OP_REQUEST,
+ VCAP_ARP_OP_REPLY,
+};
+
+int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
+ enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
+ struct flow_match_eth_addrs match;
+ struct vcap_u48_key smac, dmac;
+ int err = 0;
+
+ flow_rule_match_eth_addrs(st->frule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
+ vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
+ if (err)
+ goto out;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
+ vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ethaddr_usage);
+
+int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IP) {
+ struct flow_match_ipv4_addrs mt;
+
+ flow_rule_match_ipv4_addrs(st->frule, &mt);
+ if (mt.mask->src) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_SIP,
+ be32_to_cpu(mt.key->src),
+ be32_to_cpu(mt.mask->src));
+ if (err)
+ goto out;
+ }
+ if (mt.mask->dst) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_DIP,
+ be32_to_cpu(mt.key->dst),
+ be32_to_cpu(mt.mask->dst));
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv4_usage);
+
+int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IPV6) {
+ struct flow_match_ipv6_addrs mt;
+ struct vcap_u128_key sip;
+ struct vcap_u128_key dip;
+
+ flow_rule_match_ipv6_addrs(st->frule, &mt);
+ /* Check if address masks are non-zero */
+ if (!ipv6_addr_any(&mt.mask->src)) {
+ vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
+ vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_SIP, &sip);
+ if (err)
+ goto out;
+ }
+ if (!ipv6_addr_any(&mt.mask->dst)) {
+ vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
+ vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_DIP, &dip);
+ if (err)
+ goto out;
+ }
+ }
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv6_usage);
+
+int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_ports mt;
+ u16 value, mask;
+ int err = 0;
+
+ flow_rule_match_ports(st->frule, &mt);
+
+ if (mt.mask->src) {
+ value = be16_to_cpu(mt.key->src);
+ mask = be16_to_cpu(mt.mask->src);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->dst) {
+ value = be16_to_cpu(mt.key->dst);
+ mask = be16_to_cpu(mt.mask->dst);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_portnum_usage);
+
+int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0;
+ struct flow_match_vlan mt;
+ u16 tpid;
+ int err;
+
+ flow_rule_match_cvlan(st->frule, &mt);
+
+ tpid = be16_to_cpu(mt.key->vlan_tpid);
+
+ if (tpid == ETH_P_8021Q) {
+ vid_key = VCAP_KF_8021Q_VID1;
+ pcp_key = VCAP_KF_8021Q_PCP1;
+ }
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
+
+ return 0;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_cvlan_usage);
+
+int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
+ enum vcap_key_field vid_key,
+ enum vcap_key_field pcp_key)
+{
+ struct flow_match_vlan mt;
+ int err;
+
+ flow_rule_match_vlan(st->frule, &mt);
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
+
+ return 0;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_vlan_usage);
+
+int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_tcp mt;
+ u16 tcp_flags_mask;
+ u16 tcp_flags_key;
+ enum vcap_bit val;
+ int err = 0;
+
+ flow_rule_match_tcp(st->frule, &mt);
+ tcp_flags_key = be16_to_cpu(mt.key->flags);
+ tcp_flags_mask = be16_to_cpu(mt.mask->flags);
+
+ if (tcp_flags_mask & TCPHDR_FIN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_FIN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_SYN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_SYN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_RST) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_RST)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_PSH) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_PSH)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_ACK) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_ACK)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_URG) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_URG)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_tcp_usage);
+
+int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_arp mt;
+ u16 value, mask;
+ u32 ipval, ipmsk;
+ int err;
+
+ flow_rule_match_arp(st->frule, &mt);
+
+ if (mt.mask->op) {
+ mask = 0x3;
+ if (st->l3_proto == ETH_P_ARP) {
+ value = mt.key->op == VCAP_ARP_OP_REQUEST ?
+ VCAP_IS2_ARP_REQUEST :
+ VCAP_IS2_ARP_REPLY;
+ } else { /* RARP */
+ value = mt.key->op == VCAP_ARP_OP_REQUEST ?
+ VCAP_IS2_RARP_REQUEST :
+ VCAP_IS2_RARP_REPLY;
+ }
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
+ value, mask);
+ if (err)
+ goto out;
+ }
+
+ /* The IS2 ARP keyset does not support ARP hardware addresses */
+ if (!is_zero_ether_addr(mt.mask->sha) ||
+ !is_zero_ether_addr(mt.mask->tha)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mt.mask->sip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->sip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->tip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->tip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
+
+ return 0;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_arp_usage);
+
+int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_ip mt;
+ int err = 0;
+
+ flow_rule_match_ip(st->frule, &mt);
+
+ if (mt.mask->tos) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
+ mt.key->tos,
+ mt.mask->tos);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ip_usage);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.h b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
new file mode 100644
index 000000000000..5c55ccbee175
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP TC
+ */
+
+#ifndef __VCAP_TC__
+#define __VCAP_TC__
+
+struct vcap_tc_flower_parse_usage {
+ struct flow_cls_offload *fco;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ struct vcap_admin *admin;
+ u16 l3_proto;
+ u8 l4_proto;
+ unsigned int used_keys;
+};
+
+int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
+ enum vcap_key_field vid_key,
+ enum vcap_key_field pcp_key);
+int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st);
+
+#endif /* __VCAP_TC__ */
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index b144f2237748..f9b8f372ec8a 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1217,9 +1217,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_irq_context *gic;
- unsigned int max_irqs;
- u16 *cpus;
- cpumask_var_t req_mask;
+ unsigned int max_irqs, cpu;
int nvec, irq;
int err, i = 0, j;
@@ -1240,21 +1238,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq_vector;
}
- if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
- err = -ENOMEM;
- goto free_irq;
- }
-
- cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
- if (!cpus) {
- err = -ENOMEM;
- goto free_mask;
- }
- for (i = 0; i < nvec; i++)
- cpus[i] = cpumask_local_spread(i, gc->numa_node);
-
for (i = 0; i < nvec; i++) {
- cpumask_set_cpu(cpus[i], req_mask);
gic = &gc->irq_contexts[i];
gic->handler = NULL;
gic->arg = NULL;
@@ -1269,17 +1253,16 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq = pci_irq_vector(pdev, i);
if (irq < 0) {
err = irq;
- goto free_mask;
+ goto free_irq;
}
err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
if (err)
- goto free_mask;
- irq_set_affinity_and_hint(irq, req_mask);
- cpumask_clear(req_mask);
+ goto free_irq;
+
+ cpu = cpumask_local_spread(i, gc->numa_node);
+ irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}
- free_cpumask_var(req_mask);
- kfree(cpus);
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
if (err)
@@ -1290,13 +1273,12 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
return 0;
-free_mask:
- free_cpumask_var(req_mask);
- kfree(cpus);
free_irq:
for (j = i - 1; j >= 0; j--) {
irq = pci_irq_vector(pdev, j);
gic = &gc->irq_contexts[j];
+
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
@@ -1324,6 +1306,9 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
continue;
gic = &gc->irq_contexts[i];
+
+ /* Need to clear the hint before free_irq */
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index c060b03f7e27..08acb7b89086 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -6,12 +6,16 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
-#define TABLE_UPDATE_SLEEP_US 10
-#define TABLE_UPDATE_TIMEOUT_US 100000
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+#define MEM_INIT_SLEEP_US 1000
+#define MEM_INIT_TIMEOUT_US 100000
+
#define OCELOT_RSV_VLAN_RANGE_START 4000
struct ocelot_mact_entry {
@@ -2713,6 +2717,46 @@ static void ocelot_detect_features(struct ocelot *ocelot)
ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
}
+static int ocelot_mem_init_status(struct ocelot *ocelot)
+{
+ unsigned int val;
+ int err;
+
+ err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+
+ return err ?: val;
+}
+
+int ocelot_reset(struct ocelot *ocelot)
+{
+ int err;
+ u32 val;
+
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ if (err)
+ return err;
+
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
+
+ /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
+ * 100us) before enabling the switch core.
+ */
+ err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
+ MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
+ if (err)
+ return err;
+
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
+
+ return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+}
+EXPORT_SYMBOL(ocelot_reset);
+
int ocelot_init(struct ocelot *ocelot)
{
int i, ret;
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
index b8737efd2a85..d9ea75a14f2f 100644
--- a/drivers/net/ethernet/mscc/ocelot_devlink.c
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -487,6 +487,37 @@ static void ocelot_watermark_init(struct ocelot *ocelot)
ocelot_setup_sharing_watermarks(ocelot);
}
+/* Watermark encode
+ * Bit 8: Unit; 0:1, 1:16
+ * Bit 7-0: Value to be multiplied with unit
+ */
+u16 ocelot_wm_enc(u16 value)
+{
+ WARN_ON(value >= 16 * BIT(8));
+
+ if (value >= BIT(8))
+ return BIT(8) | (value / 16);
+
+ return value;
+}
+EXPORT_SYMBOL(ocelot_wm_enc);
+
+u16 ocelot_wm_dec(u16 wm)
+{
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+EXPORT_SYMBOL(ocelot_wm_dec);
+
+void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+EXPORT_SYMBOL(ocelot_wm_stat);
+
/* Pool size and type are fixed up at runtime. Keeping this structure to
* look up the cell size multipliers.
*/
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 7c0897e779dc..ee052404eb55 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -605,6 +605,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
flow_rule_match_control(rule, &match);
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ filter->key_type = OCELOT_VCAP_KEY_ANY;
+ filter->vlan.vid.value = match.key->vlan_id;
+ filter->vlan.vid.mask = match.mask->vlan_id;
+ filter->vlan.pcp.value[0] = match.key->vlan_priority;
+ filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ match_protocol = false;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -737,18 +749,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
match_protocol = false;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_match_vlan match;
-
- flow_rule_match_vlan(rule, &match);
- filter->key_type = OCELOT_VCAP_KEY_ANY;
- filter->vlan.vid.value = match.key->vlan_id;
- filter->vlan.vid.mask = match.mask->vlan_id;
- filter->vlan.pcp.value[0] = match.key->vlan_priority;
- filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
- match_protocol = false;
- }
-
finished_key_parsing:
if (match_protocol && proto != ETH_P_ALL) {
if (filter->block_id == VCAP_ES0) {
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 1a82f10c8853..2180ae94c744 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -335,8 +335,8 @@ static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -355,8 +355,8 @@ static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index b097fd4a4061..7388c3b0535c 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -6,7 +6,6 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/interrupt.h>
-#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/netdevice.h>
@@ -17,6 +16,7 @@
#include <linux/skbuff.h>
#include <net/switchdev.h>
+#include <soc/mscc/ocelot.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_hsio.h>
#include <soc/mscc/vsc7514_regs.h>
@@ -26,80 +26,6 @@
#define VSC7514_VCAP_POLICER_BASE 128
#define VSC7514_VCAP_POLICER_MAX 191
-#define MEM_INIT_SLEEP_US 1000
-#define MEM_INIT_TIMEOUT_US 100000
-
-static const u32 *ocelot_regmap[TARGET_MAX] = {
- [ANA] = vsc7514_ana_regmap,
- [QS] = vsc7514_qs_regmap,
- [QSYS] = vsc7514_qsys_regmap,
- [REW] = vsc7514_rew_regmap,
- [SYS] = vsc7514_sys_regmap,
- [S0] = vsc7514_vcap_regmap,
- [S1] = vsc7514_vcap_regmap,
- [S2] = vsc7514_vcap_regmap,
- [PTP] = vsc7514_ptp_regmap,
- [DEV_GMII] = vsc7514_dev_gmii_regmap,
-};
-
-static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
- [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
- [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
- [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
- [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
- [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
- [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
- [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
- [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
- [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
- [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
- [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
- [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
- [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
- [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
- [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
- [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
- [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
- [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
- [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
- [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
- [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
- [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
- [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
- [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
- [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
- [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
- [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
- [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
- [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
- [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
- [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
- [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
- [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
- [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
- [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
- [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
- /* Replicated per number of ports (12), register size 4 per port */
- [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4),
- [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4),
- [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4),
- [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4),
- [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4),
- [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4),
- [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4),
- [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4),
- [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4),
- [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4),
- [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4),
- [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4),
- [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
-};
-
static void ocelot_pll5_init(struct ocelot *ocelot)
{
/* Configure PLL5. This will need a proper CCF driver
@@ -133,11 +59,11 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
{
int ret;
- ocelot->map = ocelot_regmap;
+ ocelot->map = vsc7514_regmap;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
- ret = ocelot_regfields_init(ocelot, ocelot_regfields);
+ ret = ocelot_regfields_init(ocelot, vsc7514_regfields);
if (ret)
return ret;
@@ -190,73 +116,6 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
-static int ocelot_mem_init_status(struct ocelot *ocelot)
-{
- unsigned int val;
- int err;
-
- err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
- &val);
-
- return err ?: val;
-}
-
-static int ocelot_reset(struct ocelot *ocelot)
-{
- int err;
- u32 val;
-
- err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
- if (err)
- return err;
-
- err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- if (err)
- return err;
-
- /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
- * 100us) before enabling the switch core.
- */
- err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
- MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
- if (err)
- return err;
-
- err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- if (err)
- return err;
-
- return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
-}
-
-/* Watermark encode
- * Bit 8: Unit; 0:1, 1:16
- * Bit 7-0: Value to be multiplied with unit
- */
-static u16 ocelot_wm_enc(u16 value)
-{
- WARN_ON(value >= 16 * BIT(8));
-
- if (value >= BIT(8))
- return BIT(8) | (value / 16);
-
- return value;
-}
-
-static u16 ocelot_wm_dec(u16 wm)
-{
- if (wm & BIT(8))
- return (wm & GENMASK(7, 0)) * 16;
-
- return wm;
-}
-
-static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
-{
- *inuse = (val & GENMASK(23, 12)) >> 12;
- *maxuse = val & GENMASK(11, 0);
-}
-
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
@@ -266,49 +125,6 @@ static const struct ocelot_ops ocelot_ops = {
.netdev_to_port = ocelot_netdev_to_port,
};
-static struct vcap_props vsc7514_vcap_props[] = {
- [VCAP_ES0] = {
- .action_type_width = 0,
- .action_table = {
- [ES0_ACTION_TYPE_NORMAL] = {
- .width = 73, /* HIT_STICKY not included */
- .count = 1,
- },
- },
- .target = S0,
- .keys = vsc7514_vcap_es0_keys,
- .actions = vsc7514_vcap_es0_actions,
- },
- [VCAP_IS1] = {
- .action_type_width = 0,
- .action_table = {
- [IS1_ACTION_TYPE_NORMAL] = {
- .width = 78, /* HIT_STICKY not included */
- .count = 4,
- },
- },
- .target = S1,
- .keys = vsc7514_vcap_is1_keys,
- .actions = vsc7514_vcap_is1_actions,
- },
- [VCAP_IS2] = {
- .action_type_width = 1,
- .action_table = {
- [IS2_ACTION_TYPE_NORMAL] = {
- .width = 49,
- .count = 2
- },
- [IS2_ACTION_TYPE_SMAC_SIP] = {
- .width = 6,
- .count = 4
- },
- },
- .target = S2,
- .keys = vsc7514_vcap_is2_keys,
- .actions = vsc7514_vcap_is2_actions,
- },
-};
-
static struct ptp_clock_info ocelot_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "ocelot ptp",
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index 9d2d3e13cacf..ef6fd3f6be30 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -9,7 +9,66 @@
#include <soc/mscc/vsc7514_regs.h>
#include "ocelot.h"
-const u32 vsc7514_ana_regmap[] = {
+const struct reg_field vsc7514_regfields[REGFIELD_MAX] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
+ [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
+ [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
+ [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
+ [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+ /* Replicated per number of ports (12), register size 4 per port */
+ [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4),
+ [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4),
+ [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4),
+ [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4),
+ [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
+};
+EXPORT_SYMBOL(vsc7514_regfields);
+
+static const u32 vsc7514_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
REG(ANA_VLANMASK, 0x009004),
REG(ANA_PORT_B_DOMAIN, 0x009008),
@@ -89,9 +148,8 @@ const u32 vsc7514_ana_regmap[] = {
REG(ANA_POL_HYST, 0x008bec),
REG(ANA_POL_MISC_CFG, 0x008bf0),
};
-EXPORT_SYMBOL(vsc7514_ana_regmap);
-const u32 vsc7514_qs_regmap[] = {
+static const u32 vsc7514_qs_regmap[] = {
REG(QS_XTR_GRP_CFG, 0x000000),
REG(QS_XTR_RD, 0x000008),
REG(QS_XTR_FRM_PRUNING, 0x000010),
@@ -105,9 +163,8 @@ const u32 vsc7514_qs_regmap[] = {
REG(QS_INJ_ERR, 0x000040),
REG(QS_INH_DBG, 0x000048),
};
-EXPORT_SYMBOL(vsc7514_qs_regmap);
-const u32 vsc7514_qsys_regmap[] = {
+static const u32 vsc7514_qsys_regmap[] = {
REG(QSYS_PORT_MODE, 0x011200),
REG(QSYS_SWITCH_PORT_MODE, 0x011234),
REG(QSYS_STAT_CNT_CFG, 0x011264),
@@ -150,9 +207,8 @@ const u32 vsc7514_qsys_regmap[] = {
REG(QSYS_SE_STATE, 0x00004c),
REG(QSYS_HSCH_MISC_CFG, 0x011388),
};
-EXPORT_SYMBOL(vsc7514_qsys_regmap);
-const u32 vsc7514_rew_regmap[] = {
+static const u32 vsc7514_rew_regmap[] = {
REG(REW_PORT_VLAN_CFG, 0x000000),
REG(REW_TAG_CFG, 0x000004),
REG(REW_PORT_CFG, 0x000008),
@@ -165,9 +221,8 @@ const u32 vsc7514_rew_regmap[] = {
REG(REW_STAT_CFG, 0x000890),
REG(REW_PPT, 0x000680),
};
-EXPORT_SYMBOL(vsc7514_rew_regmap);
-const u32 vsc7514_sys_regmap[] = {
+static const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
@@ -288,9 +343,8 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_PTP_NXT, 0x0006c0),
REG(SYS_PTP_CFG, 0x0006c4),
};
-EXPORT_SYMBOL(vsc7514_sys_regmap);
-const u32 vsc7514_vcap_regmap[] = {
+static const u32 vsc7514_vcap_regmap[] = {
/* VCAP_CORE_CFG */
REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
REG(VCAP_CORE_MV_CFG, 0x000004),
@@ -312,9 +366,8 @@ const u32 vsc7514_vcap_regmap[] = {
REG(VCAP_CONST_CORE_CNT, 0x0003b8),
REG(VCAP_CONST_IF_CNT, 0x0003bc),
};
-EXPORT_SYMBOL(vsc7514_vcap_regmap);
-const u32 vsc7514_ptp_regmap[] = {
+static const u32 vsc7514_ptp_regmap[] = {
REG(PTP_PIN_CFG, 0x000000),
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
@@ -325,9 +378,8 @@ const u32 vsc7514_ptp_regmap[] = {
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
};
-EXPORT_SYMBOL(vsc7514_ptp_regmap);
-const u32 vsc7514_dev_gmii_regmap[] = {
+static const u32 vsc7514_dev_gmii_regmap[] = {
REG(DEV_CLOCK_CFG, 0x0),
REG(DEV_PORT_MISC, 0x4),
REG(DEV_EVENTS, 0x8),
@@ -368,9 +420,22 @@ const u32 vsc7514_dev_gmii_regmap[] = {
REG(DEV_PCS_FX100_CFG, 0x94),
REG(DEV_PCS_FX100_STATUS, 0x98),
};
-EXPORT_SYMBOL(vsc7514_dev_gmii_regmap);
-const struct vcap_field vsc7514_vcap_es0_keys[] = {
+const u32 *vsc7514_regmap[TARGET_MAX] = {
+ [ANA] = vsc7514_ana_regmap,
+ [QS] = vsc7514_qs_regmap,
+ [QSYS] = vsc7514_qsys_regmap,
+ [REW] = vsc7514_rew_regmap,
+ [SYS] = vsc7514_sys_regmap,
+ [S0] = vsc7514_vcap_regmap,
+ [S1] = vsc7514_vcap_regmap,
+ [S2] = vsc7514_vcap_regmap,
+ [PTP] = vsc7514_ptp_regmap,
+ [DEV_GMII] = vsc7514_dev_gmii_regmap,
+};
+EXPORT_SYMBOL(vsc7514_regmap);
+
+static const struct vcap_field vsc7514_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 4 },
[VCAP_ES0_IGR_PORT] = { 4, 4 },
[VCAP_ES0_RSV] = { 8, 2 },
@@ -380,9 +445,8 @@ const struct vcap_field vsc7514_vcap_es0_keys[] = {
[VCAP_ES0_DP] = { 24, 1 },
[VCAP_ES0_PCP] = { 25, 3 },
};
-EXPORT_SYMBOL(vsc7514_vcap_es0_keys);
-const struct vcap_field vsc7514_vcap_es0_actions[] = {
+static const struct vcap_field vsc7514_vcap_es0_actions[] = {
[VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 },
[VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 },
[VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 },
@@ -402,9 +466,8 @@ const struct vcap_field vsc7514_vcap_es0_actions[] = {
[VCAP_ES0_ACT_RSV] = { 49, 24 },
[VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 },
};
-EXPORT_SYMBOL(vsc7514_vcap_es0_actions);
-const struct vcap_field vsc7514_vcap_is1_keys[] = {
+static const struct vcap_field vsc7514_vcap_is1_keys[] = {
[VCAP_IS1_HK_TYPE] = { 0, 1 },
[VCAP_IS1_HK_LOOKUP] = { 1, 2 },
[VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 },
@@ -454,9 +517,8 @@ const struct vcap_field vsc7514_vcap_is1_keys[] = {
[VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 },
[VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is1_keys);
-const struct vcap_field vsc7514_vcap_is1_actions[] = {
+static const struct vcap_field vsc7514_vcap_is1_actions[] = {
[VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 },
[VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 },
[VCAP_IS1_ACT_QOS_ENA] = { 7, 1 },
@@ -479,9 +541,8 @@ const struct vcap_field vsc7514_vcap_is1_actions[] = {
[VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 },
[VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is1_actions);
-const struct vcap_field vsc7514_vcap_is2_keys[] = {
+static const struct vcap_field vsc7514_vcap_is2_keys[] = {
/* Common: 46 bits */
[VCAP_IS2_TYPE] = { 0, 4 },
[VCAP_IS2_HK_FIRST] = { 4, 1 },
@@ -560,9 +621,8 @@ const struct vcap_field vsc7514_vcap_is2_keys[] = {
[VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 },
[VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is2_keys);
-const struct vcap_field vsc7514_vcap_is2_actions[] = {
+static const struct vcap_field vsc7514_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 },
[VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 },
[VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 },
@@ -579,4 +639,47 @@ const struct vcap_field vsc7514_vcap_is2_actions[] = {
[VCAP_IS2_ACT_ACL_ID] = { 43, 6 },
[VCAP_IS2_ACT_HIT_CNT] = { 49, 32 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is2_actions);
+
+struct vcap_props vsc7514_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 73, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc7514_vcap_es0_keys,
+ .actions = vsc7514_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 78, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc7514_vcap_is1_keys,
+ .actions = vsc7514_vcap_is1_actions,
+ },
+ [VCAP_IS2] = {
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 49,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .target = S2,
+ .keys = vsc7514_vcap_is2_keys,
+ .actions = vsc7514_vcap_is2_actions,
+ },
+};
+EXPORT_SYMBOL(vsc7514_vcap_props);
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index c90d35f5ebca..808599b8066e 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -80,7 +80,7 @@ nfp-objs += \
abm/main.o
endif
-nfp-$(CONFIG_NFP_NET_IPSEC) += crypto/ipsec.o nfd3/ipsec.o
+nfp-$(CONFIG_NFP_NET_IPSEC) += crypto/ipsec.o nfd3/ipsec.o nfdk/ipsec.o
nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index b44263177981..b8bc89fc2540 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -10,6 +10,7 @@
#include <linux/ktime.h>
#include <net/xfrm.h>
+#include "../nfpcore/nfp_dev.h"
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
#include "crypto.h"
@@ -330,6 +331,10 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
trunc_len = -1;
break;
case SADB_AALG_MD5HMAC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
+ return -EINVAL;
+ }
set_md5hmac(cfg, &trunc_len);
break;
case SADB_AALG_SHA1HMAC:
@@ -373,6 +378,10 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL;
break;
case SADB_EALG_3DESCBC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported encryption algorithm for offload");
+ return -EINVAL;
+ }
cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_3DES;
break;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index f693119541d5..d23830b5bcb8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1964,6 +1964,27 @@ int nfp_fl_ct_stats(struct flow_cls_offload *flow,
return 0;
}
+static bool
+nfp_fl_ct_offload_nft_supported(struct flow_cls_offload *flow)
+{
+ struct flow_rule *flow_rule = flow->rule;
+ struct flow_action *flow_action =
+ &flow_rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT_METADATA) {
+ enum ip_conntrack_info ctinfo =
+ act->ct_metadata.cookie & NFCT_INFOMASK;
+
+ return ctinfo != IP_CT_NEW;
+ }
+ }
+
+ return false;
+}
+
static int
nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
{
@@ -1976,6 +1997,9 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
extack = flow->common.extack;
switch (flow->command) {
case FLOW_CLS_REPLACE:
+ if (!nfp_fl_ct_offload_nft_supported(flow))
+ return -EOPNOTSUPP;
+
/* Netfilter can request offload multiple times for the same
* flow - protect against adding duplicates.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index a8678d5612ee..060a77f2265d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -460,6 +460,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
sizeof(struct nfp_tun_neigh_v4);
unsigned long cookie = (unsigned long)neigh;
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_neigh_lag lag_info;
struct nfp_neigh_entry *nn_entry;
u32 port_id;
u8 mtype;
@@ -468,6 +469,11 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
if (!port_id)
return;
+ if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) {
+ memset(&lag_info, 0, sizeof(struct nfp_tun_neigh_lag));
+ nfp_flower_lag_get_info_from_netdev(app, netdev, &lag_info);
+ }
+
spin_lock_bh(&priv->predt_lock);
nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie,
neigh_table_params);
@@ -515,7 +521,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_ha_snapshot(common->dst_addr, neigh, netdev);
if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT)
- nfp_flower_lag_get_info_from_netdev(app, netdev, lag);
+ memcpy(lag, &lag_info, sizeof(struct nfp_tun_neigh_lag));
common->port_id = cpu_to_be32(port_id);
if (rhashtable_insert_fast(&priv->neigh_table,
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 861082c5dbff..59fb0583cc08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -192,10 +192,10 @@ static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb,
return 0;
md_bytes = sizeof(meta_id) +
- !!md_dst * NFP_NET_META_PORTID_SIZE +
- !!tls_handle * NFP_NET_META_CONN_HANDLE_SIZE +
- vlan_insert * NFP_NET_META_VLAN_SIZE +
- *ipsec * NFP_NET_META_IPSEC_FIELD_SIZE; /* IPsec has 12 bytes of metadata */
+ (!!md_dst ? NFP_NET_META_PORTID_SIZE : 0) +
+ (!!tls_handle ? NFP_NET_META_CONN_HANDLE_SIZE : 0) +
+ (vlan_insert ? NFP_NET_META_VLAN_SIZE : 0) +
+ (*ipsec ? NFP_NET_META_IPSEC_FIELD_SIZE : 0);
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
@@ -226,9 +226,6 @@ static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb,
meta_id |= NFP_NET_META_VLAN;
}
if (*ipsec) {
- /* IPsec has three consecutive 4-bit IPsec metadata types,
- * so in total IPsec has three 4 bytes of metadata.
- */
data -= NFP_NET_META_IPSEC_SIZE;
put_unaligned_be32(offload_info.seq_hi, data);
data -= NFP_NET_META_IPSEC_SIZE;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index ccacb6ab6c39..d60c0e991a91 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -6,6 +6,7 @@
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/bitfield.h>
+#include <net/xfrm.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
@@ -172,25 +173,32 @@ close_block:
static int
nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool *ipsec)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct nfp_ipsec_offload offload_info;
unsigned char *data;
bool vlan_insert;
u32 meta_id = 0;
int md_bytes;
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (xfrm_offload(skb))
+ *ipsec = nfp_net_ipsec_tx_prep(dp, skb, &offload_info);
+#endif
+
if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
md_dst = NULL;
vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
- if (!(md_dst || vlan_insert))
+ if (!(md_dst || vlan_insert || *ipsec))
return 0;
md_bytes = sizeof(meta_id) +
- !!md_dst * NFP_NET_META_PORTID_SIZE +
- vlan_insert * NFP_NET_META_VLAN_SIZE;
+ (!!md_dst ? NFP_NET_META_PORTID_SIZE : 0) +
+ (vlan_insert ? NFP_NET_META_VLAN_SIZE : 0) +
+ (*ipsec ? NFP_NET_META_IPSEC_FIELD_SIZE : 0);
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
@@ -212,6 +220,17 @@ nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
meta_id |= NFP_NET_META_VLAN;
}
+ if (*ipsec) {
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_hi, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_low, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.handle - 1, data);
+ meta_id <<= NFP_NET_META_IPSEC_FIELD_SIZE;
+ meta_id |= NFP_NET_META_IPSEC << 8 | NFP_NET_META_IPSEC << 4 | NFP_NET_META_IPSEC;
+ }
+
meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
FIELD_PREP(NFDK_META_FIELDS, meta_id);
@@ -243,6 +262,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
int nr_frags, wr_idx;
dma_addr_t dma_addr;
+ bool ipsec = false;
u64 metadata;
dp = &nn->dp;
@@ -263,7 +283,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb);
+ metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb, &ipsec);
if (unlikely((int)metadata < 0))
goto err_flush;
@@ -361,6 +381,9 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
(txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+ if (ipsec)
+ metadata = nfp_nfdk_ipsec_tx(metadata, skb);
+
if (!skb_is_gso(skb)) {
real_len = skb->len;
/* Metadata desc */
@@ -760,6 +783,15 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
return false;
data += sizeof(struct nfp_net_tls_resync_req);
break;
+#ifdef CONFIG_NFP_NET_IPSEC
+ case NFP_NET_META_IPSEC:
+ /* Note: IPsec packet could have zero saidx, so need add 1
+ * to indicate packet is IPsec packet within driver.
+ */
+ meta->ipsec_saidx = get_unaligned_be32(data) + 1;
+ data += 4;
+ break;
+#endif
default:
return true;
}
@@ -1186,6 +1218,13 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (meta.ipsec_saidx != 0 && unlikely(nfp_net_ipsec_rx(&meta, skb))) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+#endif
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
new file mode 100644
index 000000000000..58d8f59eb885
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2023 Corigine, Inc */
+
+#include <net/xfrm.h>
+
+#include "../nfp_net.h"
+#include "nfdk.h"
+
+u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+
+ if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM))
+ flags |= NFDK_DESC_TX_L3_CSUM | NFDK_DESC_TX_L4_CSUM;
+
+ return flags;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
index 0ea51d9f2325..fe55980348e9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
@@ -125,4 +125,12 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
void nfp_nfdk_ctrl_poll(struct tasklet_struct *t);
void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
+#ifndef CONFIG_NFP_NET_IPSEC
+static inline u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
+{
+ return flags;
+}
+#else
+u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb);
+#endif
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index e9d228d7a95d..918319f965b3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -293,35 +293,131 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
}
}
-static const u16 nfp_eth_media_table[] = {
- [NFP_MEDIA_1000BASE_CX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- [NFP_MEDIA_1000BASE_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- [NFP_MEDIA_10GBASE_KX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- [NFP_MEDIA_10GBASE_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- [NFP_MEDIA_10GBASE_CX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- [NFP_MEDIA_10GBASE_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- [NFP_MEDIA_10GBASE_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- [NFP_MEDIA_10GBASE_ER] = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
- [NFP_MEDIA_25GBASE_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- [NFP_MEDIA_25GBASE_KR_S] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- [NFP_MEDIA_25GBASE_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- [NFP_MEDIA_25GBASE_CR_S] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- [NFP_MEDIA_25GBASE_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- [NFP_MEDIA_40GBASE_CR4] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- [NFP_MEDIA_40GBASE_KR4] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- [NFP_MEDIA_40GBASE_SR4] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- [NFP_MEDIA_40GBASE_LR4] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- [NFP_MEDIA_50GBASE_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
- [NFP_MEDIA_50GBASE_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
- [NFP_MEDIA_50GBASE_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
- [NFP_MEDIA_50GBASE_LR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_50GBASE_ER] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_50GBASE_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_100GBASE_KR4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- [NFP_MEDIA_100GBASE_SR4] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- [NFP_MEDIA_100GBASE_CR4] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- [NFP_MEDIA_100GBASE_KP4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- [NFP_MEDIA_100GBASE_CR10] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+static const struct nfp_eth_media_link_mode {
+ u16 ethtool_link_mode;
+ u16 speed;
+} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
+ [NFP_MEDIA_1000BASE_CX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_1000BASE_KX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_10GBASE_KX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_25GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_KR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_40GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_LR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_50GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_FR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_100GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_KP4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR10] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+};
+
+static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
+ [NFP_SPEED_1G] = SPEED_1000,
+ [NFP_SPEED_10G] = SPEED_10000,
+ [NFP_SPEED_25G] = SPEED_25000,
+ [NFP_SPEED_40G] = SPEED_40000,
+ [NFP_SPEED_50G] = SPEED_50000,
+ [NFP_SPEED_100G] = SPEED_100000,
};
static void nfp_add_media_link_mode(struct nfp_port *port,
@@ -334,8 +430,12 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
};
struct nfp_cpp *cpp = port->app->cpp;
- if (nfp_eth_read_media(cpp, &ethm))
+ if (nfp_eth_read_media(cpp, &ethm)) {
+ bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
return;
+ }
+
+ bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
for (u32 i = 0; i < 2; i++) {
supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
@@ -344,20 +444,26 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
if (i < 64) {
- if (supported_modes[0] & BIT_ULL(i))
- __set_bit(nfp_eth_media_table[i],
+ if (supported_modes[0] & BIT_ULL(i)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
if (advertised_modes[0] & BIT_ULL(i))
- __set_bit(nfp_eth_media_table[i],
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
} else {
- if (supported_modes[1] & BIT_ULL(i - 64))
- __set_bit(nfp_eth_media_table[i],
+ if (supported_modes[1] & BIT_ULL(i - 64)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
if (advertised_modes[1] & BIT_ULL(i - 64))
- __set_bit(nfp_eth_media_table[i],
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
}
}
@@ -468,6 +574,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed != SPEED_UNKNOWN) {
u32 speed = cmd->base.speed / eth_port->lanes;
+ bool is_supported = false;
+
+ for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
+ if (cmd->base.speed == nfp_eth_speed_map[i] &&
+ test_bit(i, port->speed_bitmap)) {
+ is_supported = true;
+ break;
+ }
+ }
+
+ if (!is_supported) {
+ netdev_err(netdev, "Speed %u is not supported.\n",
+ cmd->base.speed);
+ err = -EINVAL;
+ goto err_bad_set;
+ }
if (req_aneg) {
netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
@@ -1908,12 +2030,12 @@ nfp_net_get_eeprom(struct net_device *netdev,
struct nfp_app *app = nfp_app_from_netdev(netdev);
u8 buf[NFP_EEPROM_LEN] = {};
- if (eeprom->len == 0)
- return -EINVAL;
-
if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
return -EOPNOTSUPP;
+ if (eeprom->len == 0)
+ return -EINVAL;
+
eeprom->magic = app->pdev->vendor | (app->pdev->device << 16);
memcpy(bytes, buf + eeprom->offset, eeprom->len);
@@ -1927,15 +2049,15 @@ nfp_net_set_eeprom(struct net_device *netdev,
struct nfp_app *app = nfp_app_from_netdev(netdev);
u8 buf[NFP_EEPROM_LEN] = {};
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
if (eeprom->len == 0)
return -EINVAL;
if (eeprom->magic != (app->pdev->vendor | app->pdev->device << 16))
return -EINVAL;
- if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
- return -EOPNOTSUPP;
-
memcpy(buf + eeprom->offset, bytes, eeprom->len);
if (nfp_net_set_port_mac_by_hwinfo(netdev, buf))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index f8cd157ca1d7..9c04f9f0e2c9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -38,6 +38,16 @@ enum nfp_port_flags {
NFP_PORT_CHANGED = 0,
};
+enum {
+ NFP_SPEED_1G,
+ NFP_SPEED_10G,
+ NFP_SPEED_25G,
+ NFP_SPEED_40G,
+ NFP_SPEED_50G,
+ NFP_SPEED_100G,
+ NFP_SUP_SPEED_NUMBER
+};
+
/**
* struct nfp_port - structure representing NFP port
* @netdev: backpointer to associated netdev
@@ -52,6 +62,7 @@ enum nfp_port_flags {
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
* @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
+ * @speed_bitmap: for %NFP_PORT_PHYS_PORT supported speed bitmap
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
* @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
@@ -78,6 +89,7 @@ struct nfp_port {
bool eth_forced;
struct nfp_eth_table_port *eth_port;
u8 __iomem *eth_stats;
+ DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER);
};
/* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
struct {
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c
index f78c2447d45b..9dd5afe37f6e 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.c
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.c
@@ -32,9 +32,12 @@ static void nfp_nic_sriov_disable(struct nfp_app *app)
static int nfp_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn)
{
- nfp_nic_dcb_init(nn);
+ return nfp_nic_dcb_init(nn);
+}
- return 0;
+static void nfp_nic_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
+{
+ nfp_nic_dcb_clean(nn);
}
static int nfp_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
@@ -72,4 +75,5 @@ const struct nfp_app_type app_nic = {
.sriov_disable = nfp_nic_sriov_disable,
.vnic_init = nfp_nic_vnic_init,
+ .vnic_clean = nfp_nic_vnic_clean,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.h b/drivers/net/ethernet/netronome/nfp/nic/main.h
index 7ba04451b8ba..094374df42b8 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.h
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.h
@@ -33,7 +33,7 @@ struct nfp_dcb {
int nfp_nic_dcb_init(struct nfp_net *nn);
void nfp_nic_dcb_clean(struct nfp_net *nn);
#else
-static inline int nfp_nic_dcb_init(struct nfp_net *nn) {return 0; }
+static inline int nfp_nic_dcb_init(struct nfp_net *nn) { return 0; }
static inline void nfp_nic_dcb_clean(struct nfp_net *nn) {}
#endif
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 626b9113e7c4..d911f4fd9af6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -708,9 +708,16 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
q->lif->index, q->name, q->hw_type, q->hw_index,
q->head_idx, ring_doorbell);
- if (ring_doorbell)
+ if (ring_doorbell) {
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
+
+ q->dbell_jiffies = jiffies;
+
+ if (q_to_qcq(q)->napi_qcq)
+ mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
+ }
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 2a1d7b9c07e7..bce3ca38669b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -25,6 +25,12 @@
#define IONIC_DEV_INFO_REG_COUNT 32
#define IONIC_DEV_CMD_REG_COUNT 32
+#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */
+#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
+#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
+#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
+#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
+
struct ionic_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
@@ -216,6 +222,8 @@ struct ionic_queue {
struct ionic_lif *lif;
struct ionic_desc_info *info;
u64 dbval;
+ unsigned long dbell_deadline;
+ unsigned long dbell_jiffies;
u16 head_idx;
u16 tail_idx;
unsigned int index;
@@ -361,4 +369,8 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
+bool ionic_txq_poke_doorbell(struct ionic_queue *q);
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q);
+
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 4dd16c487f2b..63a78a9ac241 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -16,6 +16,7 @@
#include "ionic.h"
#include "ionic_bus.h"
+#include "ionic_dev.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
#include "ionic_ethtool.h"
@@ -200,6 +201,13 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
}
}
+static void ionic_napi_deadline(struct timer_list *timer)
+{
+ struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
+
+ napi_schedule(&qcq->napi);
+}
+
static irqreturn_t ionic_isr(int irq, void *data)
{
struct napi_struct *napi = data;
@@ -269,6 +277,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
.oper = IONIC_Q_ENABLE,
},
};
+ int ret;
idev = &lif->ionic->idev;
dev = lif->ionic->dev;
@@ -276,16 +285,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ if (qcq->flags & IONIC_QCQ_F_INTR)
+ ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
+
+ ret = ionic_adminq_post_wait(lif, &ctx);
+ if (ret)
+ return ret;
+
+ if (qcq->napi.poll)
+ napi_enable(&qcq->napi);
+
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
- napi_enable(&qcq->napi);
- ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
- return ionic_adminq_post_wait(lif, &ctx);
+ return 0;
}
static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
@@ -316,6 +333,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
synchronize_irq(qcq->intr.vector);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
+ del_timer_sync(&qcq->napi_deadline);
}
/* If there was a previous fw communcation error, don't bother with
@@ -451,6 +469,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
+ n_qcq->napi_qcq = src_qcq->napi_qcq;
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -564,13 +583,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
if (flags & IONIC_QCQ_F_NOTIFYQ) {
- int q_size, cq_size;
+ int q_size;
- /* q & cq need to be contiguous in case of notifyq */
+ /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
+ * and don't alloc qc. We leave new->qc_size and new->qc_base
+ * as 0 to be sure we don't try to free it later.
+ */
q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
- cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
-
- new->q_size = PAGE_SIZE + q_size + cq_size;
+ new->q_size = PAGE_SIZE + q_size +
+ ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
new->q_base = dma_alloc_coherent(dev, new->q_size,
&new->q_base_pa, GFP_KERNEL);
if (!new->q_base) {
@@ -773,8 +794,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -828,11 +855,17 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
+ q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
qcq->flags |= IONIC_QCQ_F_INITED;
return 0;
@@ -1150,6 +1183,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev = &lif->ionic->idev;
unsigned long irqflags;
unsigned int flags = 0;
+ bool resched = false;
int rx_work = 0;
int tx_work = 0;
int n_work = 0;
@@ -1187,6 +1221,16 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
}
+ if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
+ resched = true;
+ if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
+ resched = true;
+ if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
+ resched = true;
+ if (resched)
+ mod_timer(&lif->adminqcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
@@ -3245,8 +3289,14 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
+ q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index a53984bf3544..734519895614 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -74,8 +74,10 @@ struct ionic_qcq {
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
+ struct timer_list napi_deadline;
struct napi_struct napi;
unsigned int flags;
+ struct ionic_qcq *napi_qcq;
struct dentry *dentry;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index a13530ec4dd8..08c42b039d92 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -289,6 +289,35 @@ static void ionic_adminq_cb(struct ionic_queue *q,
complete_all(&ctx->work);
}
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
+{
+ struct ionic_lif *lif = q->lif;
+ unsigned long now, then, dif;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&lif->adminq_lock, irqflags);
+
+ if (q->tail_idx == q->head_idx) {
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+ return false;
+ }
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+ }
+
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+
+ return true;
+}
+
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
struct ionic_desc_info *desc_info;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 0c3977416cd1..f761780f0162 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -22,6 +22,67 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
+bool ionic_txq_poke_doorbell(struct ionic_queue *q)
+{
+ unsigned long now, then, dif;
+ struct netdev_queue *netdev_txq;
+ struct net_device *netdev;
+
+ netdev = q->lif->netdev;
+ netdev_txq = netdev_get_tx_queue(netdev, q->index);
+
+ HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
+
+ if (q->tail_idx == q->head_idx) {
+ HARD_TX_UNLOCK(netdev, netdev_txq);
+ return false;
+ }
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+ }
+
+ HARD_TX_UNLOCK(netdev, netdev_txq);
+
+ return true;
+}
+
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
+{
+ unsigned long now, then, dif;
+
+ /* no lock, called from rx napi or txrx napi, nothing else can fill */
+
+ if (q->tail_idx == q->head_idx)
+ return false;
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+
+ dif = 2 * q->dbell_deadline;
+ if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
+ dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
+
+ q->dbell_deadline = dif;
+ }
+
+ return true;
+}
+
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
{
return netdev_get_tx_queue(q->lif->netdev, q->index);
@@ -424,6 +485,12 @@ void ionic_rx_fill(struct ionic_queue *q)
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
+
+ q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
+ mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
}
void ionic_rx_empty(struct ionic_queue *q)
@@ -511,6 +578,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
+ if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
+ mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
@@ -544,23 +614,29 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
+ if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
+ mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
int ionic_txrx_napi(struct napi_struct *napi, int budget)
{
- struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_qcq *rxqcq = napi_to_qcq(napi);
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
+ struct ionic_qcq *txqcq;
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
+ bool resched = false;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
+ txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
@@ -572,7 +648,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
- ionic_dim_update(qcq, 0);
+ ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
rxcq->bound_intr->rearm_count++;
}
@@ -583,6 +659,13 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
+ if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
+ resched = true;
+ if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
+ resched = true;
+ if (resched)
+ mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return rx_work_done;
}
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 14fc0af304ce..50b61a0a7f53 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -16,7 +16,6 @@
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/rtnetlink.h>
@@ -1071,33 +1070,25 @@ out:
return port;
}
-/* Call of_node_put(mdio) after done */
-static struct device_node *rswitch_get_mdio_node(struct rswitch_device *rdev)
-{
- struct device_node *port, *mdio;
-
- port = rswitch_get_port_node(rdev);
- if (!port)
- return NULL;
-
- mdio = of_get_child_by_name(port, "mdio");
- of_node_put(port);
-
- return mdio;
-}
-
static int rswitch_etha_get_params(struct rswitch_device *rdev)
{
- struct device_node *port;
+ u32 max_speed;
int err;
- port = rswitch_get_port_node(rdev);
- if (!port)
+ if (!rdev->np_port)
return 0; /* ignored */
- err = of_get_phy_mode(port, &rdev->etha->phy_interface);
- of_node_put(port);
+ err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
+ if (err)
+ return err;
+ err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
+ if (!err) {
+ rdev->etha->speed = max_speed;
+ return 0;
+ }
+
+ /* if no "max-speed" property, let's use default speed */
switch (rdev->etha->phy_interface) {
case PHY_INTERFACE_MODE_MII:
rdev->etha->speed = SPEED_100;
@@ -1109,11 +1100,10 @@ static int rswitch_etha_get_params(struct rswitch_device *rdev)
rdev->etha->speed = SPEED_2500;
break;
default:
- err = -EINVAL;
- break;
+ return -EINVAL;
}
- return err;
+ return 0;
}
static int rswitch_mii_register(struct rswitch_device *rdev)
@@ -1133,7 +1123,7 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->write_c45 = rswitch_etha_mii_write_c45;
mii_bus->parent = &rdev->priv->pdev->dev;
- mdio_np = rswitch_get_mdio_node(rdev);
+ mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
err = of_mdiobus_register(mii_bus, mdio_np);
if (err < 0) {
mdiobus_free(mii_bus);
@@ -1157,114 +1147,107 @@ static void rswitch_mii_unregister(struct rswitch_device *rdev)
}
}
-static void rswitch_mac_config(struct phylink_config *config,
- unsigned int mode,
- const struct phylink_link_state *state)
+static void rswitch_adjust_link(struct net_device *ndev)
{
-}
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
-static void rswitch_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
+ /* Current hardware has a restriction not to change speed at runtime */
+ if (phydev->link != rdev->etha->link) {
+ phy_print_status(phydev);
+ if (phydev->link)
+ phy_power_on(rdev->serdes);
+ else
+ phy_power_off(rdev->serdes);
+
+ rdev->etha->link = phydev->link;
+ }
}
-static void rswitch_mac_link_up(struct phylink_config *config,
- struct phy_device *phydev, unsigned int mode,
- phy_interface_t interface, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
+ struct phy_device *phydev)
{
- /* Current hardware cannot change speed at runtime */
-}
+ /* Current hardware has a restriction not to change speed at runtime */
+ switch (rdev->etha->speed) {
+ case SPEED_2500:
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ break;
+ case SPEED_1000:
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ break;
+ case SPEED_100:
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ break;
+ default:
+ break;
+ }
-static const struct phylink_mac_ops rswitch_phylink_ops = {
- .mac_config = rswitch_mac_config,
- .mac_link_down = rswitch_mac_link_down,
- .mac_link_up = rswitch_mac_link_up,
-};
+ phy_set_max_speed(phydev, rdev->etha->speed);
+}
-static int rswitch_phylink_init(struct rswitch_device *rdev)
+static int rswitch_phy_device_init(struct rswitch_device *rdev)
{
- struct device_node *port;
- struct phylink *phylink;
- int err;
+ struct phy_device *phydev;
+ struct device_node *phy;
+ int err = -ENOENT;
- port = rswitch_get_port_node(rdev);
- if (!port)
+ if (!rdev->np_port)
return -ENODEV;
- rdev->phylink_config.dev = &rdev->ndev->dev;
- rdev->phylink_config.type = PHYLINK_NETDEV;
- __set_bit(PHY_INTERFACE_MODE_SGMII, rdev->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_USXGMII, rdev->phylink_config.supported_interfaces);
- rdev->phylink_config.mac_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
+ phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
+ if (!phy)
+ return -ENODEV;
- phylink = phylink_create(&rdev->phylink_config, &port->fwnode,
- rdev->etha->phy_interface, &rswitch_phylink_ops);
- if (IS_ERR(phylink)) {
- err = PTR_ERR(phylink);
+ /* Set phydev->host_interfaces before calling of_phy_connect() to
+ * configure the PHY with the information of host_interfaces.
+ */
+ phydev = of_phy_find_device(phy);
+ if (!phydev)
goto out;
- }
+ __set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
- rdev->phylink = phylink;
- err = phylink_of_phy_connect(rdev->phylink, port, rdev->etha->phy_interface);
+ phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
+ rdev->etha->phy_interface);
+ if (!phydev)
+ goto out;
+
+ phy_set_max_speed(phydev, SPEED_2500);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ rswitch_phy_remove_link_mode(rdev, phydev);
+
+ phy_attached_info(phydev);
+
+ err = 0;
out:
- of_node_put(port);
+ of_node_put(phy);
return err;
}
-static void rswitch_phylink_deinit(struct rswitch_device *rdev)
+static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
{
- rtnl_lock();
- phylink_disconnect_phy(rdev->phylink);
- rtnl_unlock();
- phylink_destroy(rdev->phylink);
+ if (rdev->ndev->phydev) {
+ phy_disconnect(rdev->ndev->phydev);
+ rdev->ndev->phydev = NULL;
+ }
}
static int rswitch_serdes_set_params(struct rswitch_device *rdev)
{
- struct device_node *port = rswitch_get_port_node(rdev);
- struct phy *serdes;
int err;
- serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
- of_node_put(port);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET,
+ err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
rdev->etha->phy_interface);
if (err < 0)
return err;
- return phy_set_speed(serdes, rdev->etha->speed);
-}
-
-static int rswitch_serdes_init(struct rswitch_device *rdev)
-{
- struct device_node *port = rswitch_get_port_node(rdev);
- struct phy *serdes;
-
- serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
- of_node_put(port);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- return phy_init(serdes);
-}
-
-static int rswitch_serdes_deinit(struct rswitch_device *rdev)
-{
- struct device_node *port = rswitch_get_port_node(rdev);
- struct phy *serdes;
-
- serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
- of_node_put(port);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- return phy_exit(serdes);
+ return phy_set_speed(rdev->serdes, rdev->etha->speed);
}
static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
@@ -1282,9 +1265,15 @@ static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
if (err < 0)
return err;
- err = rswitch_phylink_init(rdev);
+ err = rswitch_phy_device_init(rdev);
if (err < 0)
- goto err_phylink_init;
+ goto err_phy_device_init;
+
+ rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
+ if (IS_ERR(rdev->serdes)) {
+ err = PTR_ERR(rdev->serdes);
+ goto err_serdes_phy_get;
+ }
err = rswitch_serdes_set_params(rdev);
if (err < 0)
@@ -1293,9 +1282,10 @@ static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
return 0;
err_serdes_set_params:
- rswitch_phylink_deinit(rdev);
+err_serdes_phy_get:
+ rswitch_phy_device_deinit(rdev);
-err_phylink_init:
+err_phy_device_init:
rswitch_mii_unregister(rdev);
return err;
@@ -1303,7 +1293,7 @@ err_phylink_init:
static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
{
- rswitch_phylink_deinit(rdev);
+ rswitch_phy_device_deinit(rdev);
rswitch_mii_unregister(rdev);
}
@@ -1318,7 +1308,7 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
}
rswitch_for_each_enabled_port(priv, i) {
- err = rswitch_serdes_init(priv->rdev[i]);
+ err = phy_init(priv->rdev[i]->serdes);
if (err)
goto err_serdes;
}
@@ -1327,7 +1317,7 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
err_serdes:
rswitch_for_each_enabled_port_continue_reverse(priv, i)
- rswitch_serdes_deinit(priv->rdev[i]);
+ phy_exit(priv->rdev[i]->serdes);
i = RSWITCH_NUM_PORTS;
err_init_one:
@@ -1342,7 +1332,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
int i;
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- rswitch_serdes_deinit(priv->rdev[i]);
+ phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
}
@@ -1351,7 +1341,7 @@ static int rswitch_open(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- phylink_start(rdev->phylink);
+ phy_start(ndev->phydev);
napi_enable(&rdev->napi);
netif_start_queue(ndev);
@@ -1371,7 +1361,7 @@ static int rswitch_stop(struct net_device *ndev)
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
- phylink_stop(rdev->phylink);
+ phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
return 0;
@@ -1499,8 +1489,6 @@ static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
- struct rswitch_device *rdev = netdev_priv(ndev);
-
if (!netif_running(ndev))
return -EINVAL;
@@ -1510,7 +1498,7 @@ static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd
case SIOCSHWTSTAMP:
return rswitch_hwstamp_set(ndev, req);
default:
- return phylink_mii_ioctl(rdev->phylink, req, cmd);
+ return phy_mii_ioctl(ndev->phydev, req, cmd);
}
}
@@ -1565,7 +1553,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
{
struct platform_device *pdev = priv->pdev;
struct rswitch_device *rdev;
- struct device_node *port;
struct net_device *ndev;
int err;
@@ -1594,10 +1581,10 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
netif_napi_add(ndev, &rdev->napi, rswitch_poll);
- port = rswitch_get_port_node(rdev);
- rdev->disabled = !port;
- err = of_get_ethdev_address(port, ndev);
- of_node_put(port);
+ rdev->np_port = rswitch_get_port_node(rdev);
+ rdev->disabled = !rdev->np_port;
+ err = of_get_ethdev_address(rdev->np_port, ndev);
+ of_node_put(rdev->np_port);
if (err) {
if (is_valid_ether_addr(rdev->etha->mac_addr))
eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@@ -1798,7 +1785,7 @@ static void rswitch_deinit(struct rswitch_private *priv)
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
struct rswitch_device *rdev = priv->rdev[i];
- rswitch_serdes_deinit(rdev);
+ phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(rdev);
unregister_netdev(rdev->ndev);
rswitch_device_free(priv, i);
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 49efb0f31c77..59830ab91a69 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -943,8 +943,6 @@ struct rswitch_device {
struct rswitch_private *priv;
struct net_device *ndev;
struct napi_struct napi;
- struct phylink *phylink;
- struct phylink_config phylink_config;
void __iomem *addr;
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
@@ -953,6 +951,8 @@ struct rswitch_device {
int port;
struct rswitch_etha *etha;
+ struct device_node *np_port;
+ struct phy *serdes;
};
struct rswitch_mfwd_mac_table_entry {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 9569c3356b4a..02c2adeb0a12 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1003,8 +1003,11 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
/* Determine netdevice features */
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
- if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
+ if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
net_dev->features |= NETIF_F_TSO6;
+ if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
+ net_dev->hw_enc_features |= NETIF_F_TSO6;
+ }
/* Check whether device supports TSO */
if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
net_dev->features &= ~NETIF_F_ALL_TSO;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 835caa15d55f..732774645c1a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -560,6 +560,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->has_gmac4 = 1;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
+ plat_dat->rx_clk_runs_in_lpi = 1;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 592b4067f9b8..16a7421715cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -567,6 +567,7 @@ struct tc_cbs_qopt_offload;
struct flow_cls_offload;
struct tc_taprio_qopt_offload;
struct tc_etf_qopt_offload;
+struct tc_query_caps_base;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
@@ -580,6 +581,8 @@ struct stmmac_tc_ops {
struct tc_taprio_qopt_offload *qopt);
int (*setup_etf)(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt);
+ int (*query_caps)(struct stmmac_priv *priv,
+ struct tc_query_caps_base *base);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -594,6 +597,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_taprio, __args)
#define stmmac_tc_setup_etf(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_etf, __args)
+#define stmmac_tc_query_caps(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, query_caps, __args)
struct stmmac_counters;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 734d84263fd2..868f59ec8439 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1080,7 +1080,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
- priv->eee_active = phy_init_eee(phy, 1) >= 0;
+ priv->eee_active =
+ phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);
@@ -5991,6 +5992,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
struct stmmac_priv *priv = netdev_priv(ndev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return stmmac_tc_query_caps(priv, priv, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&stmmac_block_cb_list,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 2cfb18cef1d4..9d55226479b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1107,6 +1107,25 @@ static int tc_setup_etf(struct stmmac_priv *priv,
return 0;
}
+static int tc_query_caps(struct stmmac_priv *priv,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!priv->dma_cap.estsel)
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
@@ -1114,4 +1133,5 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.setup_cls = tc_setup_cls,
.setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index e162771893af..8dc2c3085dcf 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -585,6 +585,26 @@ static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
return am65_cpsw_set_taprio(ndev, type_data);
}
+static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct tc_query_caps_base *base = type_data;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
struct netlink_ext_ack *extack,
struct flow_cls_offload *cls,
@@ -765,6 +785,8 @@ int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
+ case TC_QUERY_CAPS:
+ return am65_cpsw_tc_query_caps(ndev, type_data);
case TC_SETUP_QDISC_TAPRIO:
return am65_cpsw_setup_taprio(ndev, type_data);
case TC_SETUP_BLOCK:
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 758295c898ac..e966dd47e2db 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -20,6 +20,7 @@
#include <linux/skbuff.h>
#include <net/page_pool.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include "cpsw.h"
#include "cpts.h"
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 0922beac3ec0..c9d88673d306 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN
config LIBWX
tristate
+ select PAGE_POOL
help
Common library for Wangxun(R) Ethernet drivers.
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 1ed5e23af944..850d1615cd18 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o
+libwx-objs := wx_hw.o wx_lib.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 3d7ba0c0df38..7db57f934a91 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -8,13 +8,14 @@
#include <linux/pci.h>
#include "wx_type.h"
+#include "wx_lib.h"
#include "wx_hw.h"
static void wx_intr_disable(struct wx *wx, u64 qmask)
{
u32 mask;
- mask = (qmask & 0xFFFFFFFF);
+ mask = (qmask & U32_MAX);
if (mask)
wr32(wx, WX_PX_IMS(0), mask);
@@ -25,6 +26,45 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
}
}
+void wx_intr_enable(struct wx *wx, u64 qmask)
+{
+ u32 mask;
+
+ mask = (qmask & U32_MAX);
+ if (mask)
+ wr32(wx, WX_PX_IMC(0), mask);
+ if (wx->mac.type == wx_mac_sp) {
+ mask = (qmask >> 32);
+ if (mask)
+ wr32(wx, WX_PX_IMC(1), mask);
+ }
+}
+EXPORT_SYMBOL(wx_intr_enable);
+
+/**
+ * wx_irq_disable - Mask off interrupt generation on the NIC
+ * @wx: board private structure
+ **/
+void wx_irq_disable(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ wr32(wx, WX_PX_MISC_IEN, 0);
+ wx_intr_disable(wx, WX_INTR_ALL);
+
+ if (pdev->msix_enabled) {
+ int vector;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++)
+ synchronize_irq(wx->msix_entries[vector].vector);
+
+ synchronize_irq(wx->msix_entries[vector].vector);
+ } else {
+ synchronize_irq(pdev->irq);
+ }
+}
+EXPORT_SYMBOL(wx_irq_disable);
+
/* cmd_addr is used for some special command:
* 1. to be sector address, when implemented erase sector command
* 2. to be flash address when implemented read, write flash address
@@ -765,6 +805,37 @@ void wx_flush_sw_mac_table(struct wx *wx)
}
EXPORT_SYMBOL(wx_flush_sw_mac_table);
+static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+{
+ u32 i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
+ if (wx->mac_table[i].pools != (1ULL << pool)) {
+ memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
+ wx->mac_table[i].pools |= (1ULL << pool);
+ wx_sync_mac_table(wx);
+ return i;
+ }
+ }
+ }
+
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
+ continue;
+ wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
+ WX_MAC_STATE_IN_USE);
+ memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
+ wx->mac_table[i].pools |= (1ULL << pool);
+ wx_sync_mac_table(wx);
+ return i;
+ }
+ return -ENOMEM;
+}
+
static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -789,6 +860,184 @@ static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
return -ENOMEM;
}
+static int wx_available_rars(struct wx *wx)
+{
+ u32 i, count = 0;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state == 0)
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * wx_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ * @pool: index for mac table
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > wx_available_rars(wx))
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, netdev) {
+ wx_del_mac_filter(wx, ha->addr, pool);
+ wx_add_mac_filter(wx, ha->addr, pool);
+ count++;
+ }
+ }
+ return count;
+}
+
+/**
+ * wx_mta_vector - Determines bit-vector in multicast table to set
+ * @wx: pointer to private structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (wx->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ wx_err(wx, "MC filter type param set incorrectly\n");
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * wx_set_mta - Set bit-vector in multicast table
+ * @wx: pointer to private structure
+ * @mc_addr: Multicast address
+ *
+ * Sets the bit-vector in the multicast table.
+ **/
+static void wx_set_mta(struct wx *wx, u8 *mc_addr)
+{
+ u32 vector, vector_bit, vector_reg;
+
+ wx->addr_ctrl.mta_in_use++;
+
+ vector = wx_mta_vector(wx, mc_addr);
+ wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
+
+ /* The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[vector_value]. So we figure out what register the bit is
+ * in, read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the vector value and
+ * the bit within that register are determined by the lower 5 bits of
+ * the value.
+ */
+ vector_reg = (vector >> 5) & 0x7F;
+ vector_bit = vector & 0x1F;
+ wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ * wx_update_mc_addr_list - Updates MAC list of multicast addresses
+ * @wx: pointer to private structure
+ * @netdev: pointer to net device structure
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ **/
+static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
+{
+ struct netdev_hw_addr *ha;
+ u32 i, psrctl;
+
+ /* Set the new number of MC addresses that we are being requested to
+ * use.
+ */
+ wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
+ wx->addr_ctrl.mta_in_use = 0;
+
+ /* Clear mta_shadow */
+ wx_dbg(wx, " Clearing MTA\n");
+ memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
+
+ /* Update mta_shadow */
+ netdev_for_each_mc_addr(ha, netdev) {
+ wx_dbg(wx, " Adding the multicast addresses:\n");
+ wx_set_mta(wx, ha->addr);
+ }
+
+ /* Enable mta */
+ for (i = 0; i < wx->mac.mcft_size; i++)
+ wr32a(wx, WX_PSR_MC_TBL(0), i,
+ wx->mac.mta_shadow[i]);
+
+ if (wx->addr_ctrl.mta_in_use > 0) {
+ psrctl = rd32(wx, WX_PSR_CTL);
+ psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
+ psrctl |= WX_PSR_CTL_MFE |
+ (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
+ wr32(wx, WX_PSR_CTL, psrctl);
+ }
+
+ wx_dbg(wx, "Update mc addr list Complete\n");
+}
+
+/**
+ * wx_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int wx_write_mc_addr_list(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return 0;
+
+ wx_update_mc_addr_list(wx, netdev);
+
+ return netdev_mc_count(netdev);
+}
+
/**
* wx_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -844,6 +1093,430 @@ void wx_disable_rx(struct wx *wx)
}
EXPORT_SYMBOL(wx_disable_rx);
+static void wx_enable_rx(struct wx *wx)
+{
+ u32 psrctl;
+
+ /* enable mac receiver */
+ wr32m(wx, WX_MAC_RX_CFG,
+ WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
+
+ wr32m(wx, WX_RDB_PB_CTL,
+ WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN);
+
+ if (wx->mac.set_lben) {
+ psrctl = rd32(wx, WX_PSR_CTL);
+ psrctl |= WX_PSR_CTL_SW_EN;
+ wr32(wx, WX_PSR_CTL, psrctl);
+ wx->mac.set_lben = false;
+ }
+}
+
+/**
+ * wx_set_rxpba - Initialize Rx packet buffer
+ * @wx: pointer to private structure
+ **/
+static void wx_set_rxpba(struct wx *wx)
+{
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT;
+ wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = wx->mac.tx_pb_size;
+ txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX;
+ wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
+ wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
+}
+
+static void wx_configure_port(struct wx *wx)
+{
+ u32 value, i;
+
+ value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_D_VLAN |
+ WX_CFG_PORT_CTL_QINQ,
+ value);
+
+ wr32(wx, WX_CFG_TAG_TPID(0),
+ ETH_P_8021Q | ETH_P_8021AD << 16);
+ wx->tpid[0] = ETH_P_8021Q;
+ wx->tpid[1] = ETH_P_8021AD;
+ for (i = 1; i < 4; i++)
+ wr32(wx, WX_CFG_TAG_TPID(i),
+ ETH_P_8021Q | ETH_P_8021Q << 16);
+ for (i = 2; i < 8; i++)
+ wx->tpid[i] = ETH_P_8021Q;
+}
+
+/**
+ * wx_disable_sec_rx_path - Stops the receive data path
+ * @wx: pointer to private structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+static int wx_disable_sec_rx_path(struct wx *wx)
+{
+ u32 secrx;
+
+ wr32m(wx, WX_RSC_CTL,
+ WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS);
+
+ return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
+ 1000, 40000, false, wx, WX_RSC_ST);
+}
+
+/**
+ * wx_enable_sec_rx_path - Enables the receive data path
+ * @wx: pointer to private structure
+ *
+ * Enables the receive data path.
+ **/
+static void wx_enable_sec_rx_path(struct wx *wx)
+{
+ wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
+ WX_WRITE_FLUSH(wx);
+}
+
+void wx_set_rx_mode(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u32 fctrl, vmolr, vlnctrl;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = rd32(wx, WX_PSR_CTL);
+ fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
+ vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_MPE |
+ WX_PSR_VM_L2CTL_ROPE |
+ WX_PSR_VM_L2CTL_ROMPE);
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN);
+
+ /* set all bits that we expect to always be set */
+ fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE;
+ vmolr |= WX_PSR_VM_L2CTL_BAM |
+ WX_PSR_VM_L2CTL_AUPE |
+ WX_PSR_VM_L2CTL_VACC;
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+
+ wx->addr_ctrl.user_set_promisc = false;
+ if (netdev->flags & IFF_PROMISC) {
+ wx->addr_ctrl.user_set_promisc = true;
+ fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
+ /* pf don't want packets routing to vf, so clear UPE */
+ vmolr |= WX_PSR_VM_L2CTL_MPE;
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ }
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ fctrl |= WX_PSR_CTL_MPE;
+ vmolr |= WX_PSR_VM_L2CTL_MPE;
+ }
+
+ if (netdev->features & NETIF_F_RXALL) {
+ vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE);
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ /* receive bad packets */
+ wr32m(wx, WX_RSC_CTL,
+ WX_RSC_CTL_SAVE_MAC_ERR,
+ WX_RSC_CTL_SAVE_MAC_ERR);
+ } else {
+ vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE;
+ }
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = wx_write_uc_addr_list(netdev, 0);
+ if (count < 0) {
+ vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
+ vmolr |= WX_PSR_VM_L2CTL_UPE;
+ }
+
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = wx_write_mc_addr_list(netdev);
+ if (count < 0) {
+ vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
+ vmolr |= WX_PSR_VM_L2CTL_MPE;
+ }
+
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ wr32(wx, WX_PSR_CTL, fctrl);
+ wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
+}
+EXPORT_SYMBOL(wx_set_rx_mode);
+
+static void wx_set_rx_buffer_len(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ u32 mhadd, max_frame;
+
+ max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
+ mhadd = rd32(wx, WX_PSR_MAX_SZ);
+ if (max_frame != mhadd)
+ wr32(wx, WX_PSR_MAX_SZ, max_frame);
+}
+
+/* Disable the specified rx queue */
+void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u32 rxdctl;
+ int ret;
+
+ /* write value back with RRCFG.EN bit cleared */
+ wr32m(wx, WX_PX_RR_CFG(reg_idx),
+ WX_PX_RR_CFG_RR_EN, 0);
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN),
+ 10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
+
+ if (ret == -ETIMEDOUT) {
+ /* Just for information */
+ wx_err(wx,
+ "RRCFG.EN on Rx queue %d not cleared within the polling period\n",
+ reg_idx);
+ }
+}
+EXPORT_SYMBOL(wx_disable_rx_queue);
+
+static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u32 rxdctl;
+ int ret;
+
+ ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN,
+ 1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
+
+ if (ret == -ETIMEDOUT) {
+ /* Just for information */
+ wx_err(wx,
+ "RRCFG.EN on Rx queue %d not set within the polling period\n",
+ reg_idx);
+ }
+}
+
+static void wx_configure_srrctl(struct wx *wx,
+ struct wx_ring *rx_ring)
+{
+ u16 reg_idx = rx_ring->reg_idx;
+ u32 srrctl;
+
+ srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ |
+ WX_PX_RR_CFG_RR_BUF_SZ |
+ WX_PX_RR_CFG_SPLIT_MODE);
+ /* configure header buffer length, needed for RSC */
+ srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
+
+ /* configure the packet buffer length */
+ srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
+
+ wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
+}
+
+static void wx_configure_tx_ring(struct wx *wx,
+ struct wx_ring *ring)
+{
+ u32 txdctl = WX_PX_TR_CFG_ENABLE;
+ u8 reg_idx = ring->reg_idx;
+ u64 tdba = ring->dma;
+ int ret;
+
+ /* disable queue to avoid issues while updating state */
+ wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
+ WX_WRITE_FLUSH(wx);
+
+ wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_PX_TR_RP(reg_idx), 0);
+ wr32(wx, WX_PX_TR_WP(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
+
+ if (ring->count < WX_MAX_TXD)
+ txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
+ txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
+
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct wx_tx_buffer) * ring->count);
+
+ /* enable queue */
+ wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
+
+ /* poll to verify queue is enabled */
+ ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE,
+ 1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
+ if (ret == -ETIMEDOUT)
+ wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
+}
+
+static void wx_configure_rx_ring(struct wx *wx,
+ struct wx_ring *ring)
+{
+ u16 reg_idx = ring->reg_idx;
+ union wx_rx_desc *rx_desc;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ wx_disable_rx_queue(wx, ring);
+
+ wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
+
+ if (ring->count == WX_MAX_RXD)
+ rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
+ else
+ rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
+
+ rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
+ wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_PX_RR_RP(reg_idx), 0);
+ wr32(wx, WX_PX_RR_WP(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
+
+ wx_configure_srrctl(wx, ring);
+
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct wx_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = WX_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
+ /* enable receive descriptor ring */
+ wr32m(wx, WX_PX_RR_CFG(reg_idx),
+ WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN);
+
+ wx_enable_rx_queue(wx, ring);
+ wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
+}
+
+/**
+ * wx_configure_tx - Configure Transmit Unit after Reset
+ * @wx: pointer to private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void wx_configure_tx(struct wx *wx)
+{
+ u32 i;
+
+ /* TDM_CTL.TE must be before Tx queues are enabled */
+ wr32m(wx, WX_TDM_CTL,
+ WX_TDM_CTL_TE, WX_TDM_CTL_TE);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_configure_tx_ring(wx, wx->tx_ring[i]);
+
+ wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
+
+ if (wx->mac.type == wx_mac_em)
+ wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
+
+ /* enable mac transmitter */
+ wr32m(wx, WX_MAC_TX_CFG,
+ WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
+}
+
+/**
+ * wx_configure_rx - Configure Receive Unit after Reset
+ * @wx: pointer to private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void wx_configure_rx(struct wx *wx)
+{
+ u32 psrtype, i;
+ int ret;
+
+ wx_disable_rx(wx);
+
+ psrtype = WX_RDB_PL_CFG_L4HDR |
+ WX_RDB_PL_CFG_L3HDR |
+ WX_RDB_PL_CFG_L2HDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR;
+ wr32(wx, WX_RDB_PL_CFG(0), psrtype);
+
+ /* enable hw crc stripping */
+ wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
+
+ if (wx->mac.type == wx_mac_sp) {
+ u32 psrctl;
+
+ /* RSC Setup */
+ psrctl = rd32(wx, WX_PSR_CTL);
+ psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
+ psrctl |= WX_PSR_CTL_RSC_DIS;
+ wr32(wx, WX_PSR_CTL, psrctl);
+ }
+
+ /* set_rx_buffer_len must be called before ring initialization */
+ wx_set_rx_buffer_len(wx);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_configure_rx_ring(wx, wx->rx_ring[i]);
+
+ /* Enable all receives, disable security engine prior to block traffic */
+ ret = wx_disable_sec_rx_path(wx);
+ if (ret < 0)
+ wx_err(wx, "The register status is abnormal, please check device.");
+
+ wx_enable_rx(wx);
+ wx_enable_sec_rx_path(wx);
+}
+
+static void wx_configure_isb(struct wx *wx)
+{
+ /* set ISB Address */
+ wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
+}
+
+void wx_configure(struct wx *wx)
+{
+ wx_set_rxpba(wx);
+ wx_configure_port(wx);
+
+ wx_set_rx_mode(wx->netdev);
+
+ wx_enable_sec_rx_path(wx);
+
+ wx_configure_tx(wx);
+ wx_configure_rx(wx);
+ wx_configure_isb(wx);
+}
+EXPORT_SYMBOL(wx_configure);
+
/**
* wx_disable_pcie_master - Disable PCI-express master access
* @wx: pointer to hardware structure
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 803983546f3a..44dfd6ea442a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -4,6 +4,8 @@
#ifndef _WX_HW_H_
#define _WX_HW_H_
+void wx_intr_enable(struct wx *wx, u64 qmask);
+void wx_irq_disable(struct wx *wx);
int wx_check_flash_load(struct wx *wx, u32 check_bit);
void wx_control_hw(struct wx *wx, bool drv);
int wx_mng_present(struct wx *wx);
@@ -20,6 +22,9 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
void wx_flush_sw_mac_table(struct wx *wx);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
+void wx_set_rx_mode(struct net_device *netdev);
+void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
+void wx_configure(struct wx *wx);
int wx_disable_pcie_master(struct wx *wx);
int wx_stop_adapter(struct wx *wx);
void wx_reset_misc(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
new file mode 100644
index 000000000000..2ee286b2b177
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -0,0 +1,2004 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <net/page_pool.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_lib.h"
+#include "wx_hw.h"
+
+/* wx_test_staterr - tests bits in Rx descriptor status and error fields */
+static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
+ return false;
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
+/**
+ * wx_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void wx_reuse_rx_page(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *old_buff)
+{
+ u16 nta = rx_ring->next_to_alloc;
+ struct wx_rx_buffer *new_buff;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_buff->page = old_buff->page;
+ new_buff->page_dma = old_buff->page_dma;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+static void wx_dma_sync_frag(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer)
+{
+ struct sk_buff *skb = rx_buffer->skb;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ WX_CB(skb)->dma,
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ DMA_FROM_DEVICE);
+
+ /* If the page was released, just unmap it. */
+ if (unlikely(WX_CB(skb)->page_released))
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+}
+
+static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
+ union wx_rx_desc *rx_desc,
+ struct sk_buff **skb,
+ int *rx_buffer_pgcnt)
+{
+ struct wx_rx_buffer *rx_buffer;
+ unsigned int size;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+
+#if (PAGE_SIZE < 8192)
+ *rx_buffer_pgcnt = page_count(rx_buffer->page);
+#else
+ *rx_buffer_pgcnt = 0;
+#endif
+
+ prefetchw(rx_buffer->page);
+ *skb = rx_buffer->skb;
+
+ /* Delay unmapping of the first packet. It carries the header
+ * information, HW may still access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) {
+ if (!*skb)
+ goto skip_sync;
+ } else {
+ if (*skb)
+ wx_dma_sync_frag(rx_ring, rx_buffer);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+skip_sync:
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+static void wx_put_rx_buffer(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer,
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
+{
+ if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
+ /* hand second half of page back to the ring */
+ wx_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
+ /* the page has been released from the ring */
+ WX_CB(skb)->page_released = true;
+ else
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+ rx_buffer->skb = NULL;
+}
+
+static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer,
+ union wx_rx_desc *rx_desc)
+{
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = WX_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
+ struct sk_buff *skb = rx_buffer->skb;
+
+ if (!skb) {
+ void *page_addr = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+
+ if (size <= WX_RXBUFFER_256) {
+ memcpy(__skb_put(skb, size), page_addr,
+ ALIGN(size, sizeof(long)));
+ rx_buffer->pagecnt_bias++;
+
+ return skb;
+ }
+
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
+ WX_CB(skb)->dma = rx_buffer->dma;
+
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset,
+ size, truesize);
+ goto out;
+
+ } else {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+ }
+
+out:
+#if (PAGE_SIZE < 8192)
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+#endif
+
+ return skb;
+}
+
+static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ page = page_pool_dev_alloc_pages(rx_ring->page_pool);
+ WARN_ON(!page);
+ dma = page_pool_get_dma_addr(page);
+
+ bi->page_dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
+
+ return true;
+}
+
+/**
+ * wx_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union wx_rx_desc *rx_desc;
+ struct wx_rx_buffer *bi;
+
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
+ rx_desc = WX_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ do {
+ if (!wx_alloc_mapped_page(rx_ring, bi))
+ break;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ WX_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ rx_desc->read.pkt_addr =
+ cpu_to_le64(bi->page_dma + bi->page_offset);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = WX_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.upper.status_error = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+}
+
+u16 wx_desc_unused(struct wx_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+/**
+ * wx_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool wx_is_non_eop(struct wx_ring *rx_ring,
+ union wx_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(WX_RX_DESC(rx_ring, ntc));
+
+ /* if we are the last buffer then there is nothing else to do */
+ if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)))
+ return false;
+
+ rx_ring->rx_buffer_info[ntc].skb = skb;
+
+ return true;
+}
+
+static void wx_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ skb_frag_off_add(frag, pull_len);
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * wx_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right. These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool wx_cleanup_headers(struct wx_ring *rx_ring,
+ union wx_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rx_ring->netdev;
+
+ /* verify that the packet does not have any known errors */
+ if (!netdev ||
+ unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) &&
+ !(netdev->features & NETIF_F_RXALL))) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+
+ /* place header in linear portion of buffer */
+ if (!skb_headlen(skb))
+ wx_pull_tail(skb);
+
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+/**
+ * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
+ struct wx_ring *rx_ring,
+ int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = wx_desc_unused(rx_ring);
+
+ do {
+ struct wx_rx_buffer *rx_buffer;
+ union wx_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ int rx_buffer_pgcnt;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= WX_RX_BUFFER_WRITE) {
+ wx_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt);
+
+ /* retrieve a buffer from the ring */
+ skb = wx_build_skb(rx_ring, rx_buffer, rx_desc);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
+ cleaned_count++;
+
+ /* place incomplete frames back on ring for completion */
+ if (wx_is_non_eop(rx_ring, rx_desc, skb))
+ continue;
+
+ /* verify the packet layout is correct */
+ if (wx_cleanup_headers(rx_ring, rx_desc, skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* update budget accounting */
+ total_rx_packets++;
+ } while (likely(total_rx_packets < budget));
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
+
+ return total_rx_packets;
+}
+
+static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+
+/**
+ * wx_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: structure containing interrupt and ring information
+ * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
+ **/
+static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
+ struct wx_ring *tx_ring, int napi_budget)
+{
+ unsigned int budget = q_vector->wx->tx_work_limit;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int i = tx_ring->next_to_clean;
+ struct wx_tx_buffer *tx_buffer;
+ union wx_tx_desc *tx_desc;
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ return true;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = WX_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ union wx_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
+ /* free the skb */
+ napi_consume_skb(tx_buffer->skb, napi_budget);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ netdev_tx_completed_queue(wx_txring_txq(tx_ring),
+ total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ netif_running(tx_ring->netdev))
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ }
+
+ return !!budget;
+}
+
+/**
+ * wx_poll - NAPI polling RX/TX cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ **/
+static int wx_poll(struct napi_struct *napi, int budget)
+{
+ struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi);
+ int per_ring_budget, work_done = 0;
+ struct wx *wx = q_vector->wx;
+ bool clean_complete = true;
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->tx) {
+ if (!wx_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
+
+ /* Exit if we are called by netpoll */
+ if (budget <= 0)
+ return budget;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling
+ */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget / q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ wx_for_each_ring(ring, q_vector->rx) {
+ int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget);
+
+ work_done += cleaned;
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
+ }
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (netif_running(wx->netdev))
+ wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
+ }
+
+ return min(work_done, budget - 1);
+}
+
+static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size)
+{
+ if (likely(wx_desc_unused(tx_ring) >= size))
+ return 0;
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ /* For the next check */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (likely(wx_desc_unused(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ return 0;
+}
+
+static void wx_tx_map(struct wx_ring *tx_ring,
+ struct wx_tx_buffer *first)
+{
+ struct sk_buff *skb = first->skb;
+ struct wx_tx_buffer *tx_buffer;
+ u16 i = tx_ring->next_to_use;
+ unsigned int data_len, size;
+ union wx_tx_desc *tx_desc;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u32 cmd_type;
+
+ cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS;
+ tx_desc = WX_TX_DESC(tx_ring, i);
+
+ tx_desc->read.olinfo_status = cpu_to_le32(skb->len << WX_TXD_PAYLEN_SHIFT);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > WX_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ dma += WX_MAX_DATA_PER_TXD;
+ size -= WX_MAX_DATA_PER_TXD;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ size = skb_frag_size(frag);
+
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ }
+
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= size | WX_TXD_EOP | WX_TXD_RS;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
+
+ skb_tx_timestamp(skb);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ wx_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
+ writel(i, tx_ring->tail);
+
+ return;
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
+ i += tx_ring->count;
+ i--;
+ }
+
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
+ tx_ring->next_to_use = i;
+}
+
+static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
+ struct wx_ring *tx_ring)
+{
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ struct wx_tx_buffer *first;
+ unsigned short f;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->
+ frags[f]));
+
+ if (wx_maybe_stop_tx(tx_ring, count + 3))
+ return NETDEV_TX_BUSY;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
+ wx_tx_map(tx_ring, first);
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ unsigned int r_idx = skb->queue_mapping;
+ struct wx *wx = netdev_priv(netdev);
+ struct wx_ring *tx_ring;
+
+ if (!netif_carrier_ok(netdev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* The minimum packet size for olinfo paylen is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
+
+ if (r_idx >= wx->num_tx_queues)
+ r_idx = r_idx % wx->num_tx_queues;
+ tx_ring = wx->tx_ring[r_idx];
+
+ return wx_xmit_frame_ring(skb, tx_ring);
+}
+EXPORT_SYMBOL(wx_xmit_frame);
+
+void wx_napi_enable_all(struct wx *wx)
+{
+ struct wx_q_vector *q_vector;
+ int q_idx;
+
+ for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
+ q_vector = wx->q_vector[q_idx];
+ napi_enable(&q_vector->napi);
+ }
+}
+EXPORT_SYMBOL(wx_napi_enable_all);
+
+void wx_napi_disable_all(struct wx *wx)
+{
+ struct wx_q_vector *q_vector;
+ int q_idx;
+
+ for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
+ q_vector = wx->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+EXPORT_SYMBOL(wx_napi_disable_all);
+
+/**
+ * wx_set_rss_queues: Allocate queues for RSS
+ * @wx: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static void wx_set_rss_queues(struct wx *wx)
+{
+ wx->num_rx_queues = wx->mac.max_rx_queues;
+ wx->num_tx_queues = wx->mac.max_tx_queues;
+}
+
+static void wx_set_num_queues(struct wx *wx)
+{
+ /* Start with base case */
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+ wx->queues_per_pool = 1;
+
+ wx_set_rss_queues(wx);
+}
+
+/**
+ * wx_acquire_msix_vectors - acquire MSI-X vectors
+ * @wx: board private structure
+ *
+ * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
+ * return a negative error code if unable to acquire MSI-X vectors for any
+ * reason.
+ */
+static int wx_acquire_msix_vectors(struct wx *wx)
+{
+ struct irq_affinity affd = {0, };
+ int nvecs, i;
+
+ nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors);
+
+ wx->msix_entries = kcalloc(nvecs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entries)
+ return -ENOMEM;
+
+ nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
+ nvecs,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ &affd);
+ if (nvecs < 0) {
+ wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
+ kfree(wx->msix_entries);
+ wx->msix_entries = NULL;
+ return nvecs;
+ }
+
+ for (i = 0; i < nvecs; i++) {
+ wx->msix_entries[i].entry = i;
+ wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i);
+ }
+
+ /* one for msix_other */
+ nvecs -= 1;
+ wx->num_q_vectors = nvecs;
+ wx->num_rx_queues = nvecs;
+ wx->num_tx_queues = nvecs;
+
+ return 0;
+}
+
+/**
+ * wx_set_interrupt_capability - set MSI-X or MSI if supported
+ * @wx: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int wx_set_interrupt_capability(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ int nvecs, ret;
+
+ /* We will try to get MSI-X interrupts first */
+ ret = wx_acquire_msix_vectors(wx);
+ if (ret == 0 || (ret == -ENOMEM))
+ return ret;
+
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+ wx->num_q_vectors = 1;
+
+ /* minmum one for queue, one for misc*/
+ nvecs = 1;
+ nvecs = pci_alloc_irq_vectors(pdev, nvecs,
+ nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (nvecs == 1) {
+ if (pdev->msi_enabled)
+ wx_err(wx, "Fallback to MSI.\n");
+ else
+ wx_err(wx, "Fallback to LEGACY.\n");
+ } else {
+ wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs);
+ return nvecs;
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
+
+ return 0;
+}
+
+/**
+ * wx_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * @wx: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV.
+ *
+ **/
+static void wx_cache_ring_rss(struct wx *wx)
+{
+ u16 i;
+
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx->rx_ring[i]->reg_idx = i;
+
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx->tx_ring[i]->reg_idx = i;
+}
+
+static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head)
+{
+ ring->next = head->ring;
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * wx_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @wx: board private structure to initialize
+ * @v_count: q_vectors allocated on wx, used for ring interleaving
+ * @v_idx: index of vector in wx struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int wx_alloc_q_vector(struct wx *wx,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
+{
+ struct wx_q_vector *q_vector;
+ int ring_count, default_itr;
+ struct wx_ring *ring;
+
+ /* note this will allocate space for the ring structure as well! */
+ ring_count = txr_count + rxr_count;
+
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
+ GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(wx->netdev, &q_vector->napi,
+ wx_poll);
+
+ /* tie q_vector and wx together */
+ wx->q_vector[v_idx] = q_vector;
+ q_vector->wx = wx;
+ q_vector->v_idx = v_idx;
+ if (cpu_online(v_idx))
+ q_vector->numa_node = cpu_to_node(v_idx);
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ if (wx->mac.type == wx_mac_sp)
+ default_itr = WX_12K_ITR;
+ else
+ default_itr = WX_7K_ITR;
+ /* initialize ITR */
+ if (txr_count && !rxr_count)
+ /* tx only vector */
+ q_vector->itr = wx->tx_itr_setting ?
+ default_itr : wx->tx_itr_setting;
+ else
+ /* rx or rx/tx vector */
+ q_vector->itr = wx->rx_itr_setting ?
+ default_itr : wx->rx_itr_setting;
+
+ while (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &wx->pdev->dev;
+ ring->netdev = wx->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ wx_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = wx->tx_ring_count;
+
+ ring->queue_index = txr_idx;
+
+ /* assign ring to wx */
+ wx->tx_ring[txr_idx] = ring;
+
+ /* update count and index */
+ txr_count--;
+ txr_idx += v_count;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ while (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &wx->pdev->dev;
+ ring->netdev = wx->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ wx_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
+ ring->count = wx->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to wx */
+ wx->rx_ring[rxr_idx] = ring;
+
+ /* update count and index */
+ rxr_count--;
+ rxr_idx += v_count;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ return 0;
+}
+
+/**
+ * wx_free_q_vector - Free memory allocated for specific interrupt vector
+ * @wx: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void wx_free_q_vector(struct wx *wx, int v_idx)
+{
+ struct wx_q_vector *q_vector = wx->q_vector[v_idx];
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->tx)
+ wx->tx_ring[ring->queue_index] = NULL;
+
+ wx_for_each_ring(ring, q_vector->rx)
+ wx->rx_ring[ring->queue_index] = NULL;
+
+ wx->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * wx_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @wx: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int wx_alloc_q_vectors(struct wx *wx)
+{
+ unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ unsigned int rxr_remaining = wx->num_rx_queues;
+ unsigned int txr_remaining = wx->num_tx_queues;
+ unsigned int q_vectors = wx->num_q_vectors;
+ int rqpv, tqpv;
+ int err;
+
+ for (; v_idx < q_vectors; v_idx++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ err = wx_alloc_q_vector(wx, q_vectors, v_idx,
+ tqpv, txr_idx,
+ rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
+ return 0;
+
+err_out:
+ wx->num_tx_queues = 0;
+ wx->num_rx_queues = 0;
+ wx->num_q_vectors = 0;
+
+ while (v_idx--)
+ wx_free_q_vector(wx, v_idx);
+
+ return -ENOMEM;
+}
+
+/**
+ * wx_free_q_vectors - Free memory allocated for interrupt vectors
+ * @wx: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void wx_free_q_vectors(struct wx *wx)
+{
+ int v_idx = wx->num_q_vectors;
+
+ wx->num_tx_queues = 0;
+ wx->num_rx_queues = 0;
+ wx->num_q_vectors = 0;
+
+ while (v_idx--)
+ wx_free_q_vector(wx, v_idx);
+}
+
+void wx_reset_interrupt_capability(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ return;
+
+ pci_free_irq_vectors(wx->pdev);
+ if (pdev->msix_enabled) {
+ kfree(wx->msix_entries);
+ wx->msix_entries = NULL;
+ }
+}
+EXPORT_SYMBOL(wx_reset_interrupt_capability);
+
+/**
+ * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @wx: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void wx_clear_interrupt_scheme(struct wx *wx)
+{
+ wx_free_q_vectors(wx);
+ wx_reset_interrupt_capability(wx);
+}
+EXPORT_SYMBOL(wx_clear_interrupt_scheme);
+
+int wx_init_interrupt_scheme(struct wx *wx)
+{
+ int ret;
+
+ /* Number of supported queues */
+ wx_set_num_queues(wx);
+
+ /* Set interrupt mode */
+ ret = wx_set_interrupt_capability(wx);
+ if (ret) {
+ wx_err(wx, "Allocate irq vectors for failed.\n");
+ return ret;
+ }
+
+ /* Allocate memory for queues */
+ ret = wx_alloc_q_vectors(wx);
+ if (ret) {
+ wx_err(wx, "Unable to allocate memory for queue vectors.\n");
+ wx_reset_interrupt_capability(wx);
+ return ret;
+ }
+
+ wx_cache_ring_rss(wx);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_init_interrupt_scheme);
+
+irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector = data;
+
+ /* EIAM disabled interrupts (on this vector) for us */
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule_irqoff(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(wx_msix_clean_rings);
+
+void wx_free_irq(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ int vector;
+
+ if (!(pdev->msix_enabled)) {
+ free_irq(pdev->irq, wx);
+ return;
+ }
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_entries[vector];
+
+ /* free only the irqs that were actually requested */
+ if (!q_vector->rx.ring && !q_vector->tx.ring)
+ continue;
+
+ free_irq(entry->vector, q_vector);
+ }
+
+ free_irq(wx->msix_entries[vector].vector, wx);
+}
+EXPORT_SYMBOL(wx_free_irq);
+
+/**
+ * wx_setup_isb_resources - allocate interrupt status resources
+ * @wx: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+int wx_setup_isb_resources(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ wx->isb_mem = dma_alloc_coherent(&pdev->dev,
+ sizeof(u32) * 4,
+ &wx->isb_dma,
+ GFP_KERNEL);
+ if (!wx->isb_mem) {
+ wx_err(wx, "Alloc isb_mem failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_setup_isb_resources);
+
+/**
+ * wx_free_isb_resources - allocate all queues Rx resources
+ * @wx: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+void wx_free_isb_resources(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ dma_free_coherent(&pdev->dev, sizeof(u32) * 4,
+ wx->isb_mem, wx->isb_dma);
+ wx->isb_mem = NULL;
+}
+EXPORT_SYMBOL(wx_free_isb_resources);
+
+u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx)
+{
+ u32 cur_tag = 0;
+
+ cur_tag = wx->isb_mem[WX_ISB_HEADER];
+ wx->isb_tag[idx] = cur_tag;
+
+ return (__force u32)cpu_to_le32(wx->isb_mem[idx]);
+}
+EXPORT_SYMBOL(wx_misc_isb);
+
+/**
+ * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @wx: pointer to wx struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ **/
+static void wx_set_ivar(struct wx *wx, s8 direction,
+ u16 queue, u16 msix_vector)
+{
+ u32 ivar, index;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ index = 0;
+ ivar = rd32(wx, WX_PX_MISC_IVAR);
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ wr32(wx, WX_PX_MISC_IVAR, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ wr32(wx, WX_PX_IVAR(queue >> 1), ivar);
+ }
+}
+
+/**
+ * wx_write_eitr - write EITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update EITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void wx_write_eitr(struct wx_q_vector *q_vector)
+{
+ struct wx *wx = q_vector->wx;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg;
+
+ if (wx->mac.type == wx_mac_sp)
+ itr_reg = q_vector->itr & WX_SP_MAX_EITR;
+ else
+ itr_reg = q_vector->itr & WX_EM_MAX_EITR;
+
+ itr_reg |= WX_PX_ITR_CNT_WDIS;
+
+ wr32(wx, WX_PX_ITR(v_idx), itr_reg);
+}
+
+/**
+ * wx_configure_vectors - Configure vectors for hardware
+ * @wx: board private structure
+ *
+ * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY
+ * interrupts.
+ **/
+void wx_configure_vectors(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ u32 eitrsel = 0;
+ u16 v_idx;
+
+ if (pdev->msix_enabled) {
+ /* Populate MSIX to EITR Select */
+ wr32(wx, WX_PX_ITRSEL, eitrsel);
+ /* use EIAM to auto-mask when MSI-X interrupt is asserted
+ * this saves a register write for every interrupt
+ */
+ wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL);
+ } else {
+ /* legacy interrupts, use EIAM to auto-mask when reading EICR,
+ * specifically only auto mask tx and rx interrupts.
+ */
+ wr32(wx, WX_PX_GPIE, 0);
+ }
+
+ /* Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
+ struct wx_q_vector *q_vector = wx->q_vector[v_idx];
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->rx)
+ wx_set_ivar(wx, 0, ring->reg_idx, v_idx);
+
+ wx_for_each_ring(ring, q_vector->tx)
+ wx_set_ivar(wx, 1, ring->reg_idx, v_idx);
+
+ wx_write_eitr(q_vector);
+ }
+
+ wx_set_ivar(wx, -1, 0, v_idx);
+ if (pdev->msix_enabled)
+ wr32(wx, WX_PX_ITR(v_idx), 1950);
+}
+EXPORT_SYMBOL(wx_configure_vectors);
+
+/**
+ * wx_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void wx_clean_rx_ring(struct wx_ring *rx_ring)
+{
+ struct wx_rx_buffer *rx_buffer;
+ u16 i = rx_ring->next_to_clean;
+
+ rx_buffer = &rx_ring->rx_buffer_info[i];
+
+ /* Free all the Rx ring sk_buffs */
+ while (i != rx_ring->next_to_alloc) {
+ if (rx_buffer->skb) {
+ struct sk_buff *skb = rx_buffer->skb;
+
+ if (WX_CB(skb)->page_released)
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+
+ dev_kfree_skb(skb);
+ }
+
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ WX_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+
+ i++;
+ rx_buffer++;
+ if (i == rx_ring->count) {
+ i = 0;
+ rx_buffer = rx_ring->rx_buffer_info;
+ }
+ }
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * wx_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @wx: board private structure
+ **/
+void wx_clean_all_rx_rings(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_clean_rx_ring(wx->rx_ring[i]);
+}
+EXPORT_SYMBOL(wx_clean_all_rx_rings);
+
+/**
+ * wx_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+static void wx_free_rx_resources(struct wx_ring *rx_ring)
+{
+ wx_clean_rx_ring(rx_ring);
+ kvfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+
+ if (rx_ring->page_pool) {
+ page_pool_destroy(rx_ring->page_pool);
+ rx_ring->page_pool = NULL;
+ }
+}
+
+/**
+ * wx_free_all_rx_resources - Free Rx Resources for All Queues
+ * @wx: pointer to hardware structure
+ *
+ * Free all receive software resources
+ **/
+static void wx_free_all_rx_resources(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_free_rx_resources(wx->rx_ring[i]);
+}
+
+/**
+ * wx_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void wx_clean_tx_ring(struct wx_ring *tx_ring)
+{
+ struct wx_tx_buffer *tx_buffer;
+ u16 i = tx_ring->next_to_clean;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+
+ while (i != tx_ring->next_to_use) {
+ union wx_tx_desc *eop_desc, *tx_desc;
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = WX_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ netdev_tx_reset_queue(wx_txring_txq(tx_ring));
+
+ /* reset next_to_use and next_to_clean */
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * wx_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @wx: board private structure
+ **/
+void wx_clean_all_tx_rings(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_clean_tx_ring(wx->tx_ring[i]);
+}
+EXPORT_SYMBOL(wx_clean_all_tx_rings);
+
+/**
+ * wx_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+static void wx_free_tx_resources(struct wx_ring *tx_ring)
+{
+ wx_clean_tx_ring(tx_ring);
+ kvfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * wx_free_all_tx_resources - Free Tx Resources for All Queues
+ * @wx: pointer to hardware structure
+ *
+ * Free all transmit software resources
+ **/
+static void wx_free_all_tx_resources(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_free_tx_resources(wx->tx_ring[i]);
+}
+
+void wx_free_resources(struct wx *wx)
+{
+ wx_free_isb_resources(wx);
+ wx_free_all_rx_resources(wx);
+ wx_free_all_tx_resources(wx);
+}
+EXPORT_SYMBOL(wx_free_resources);
+
+static int wx_alloc_page_pool(struct wx_ring *rx_ring)
+{
+ int ret = 0;
+
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = rx_ring->size,
+ .nid = dev_to_node(rx_ring->dev),
+ .dev = rx_ring->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = PAGE_SIZE,
+ };
+
+ rx_ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_ring->page_pool)) {
+ rx_ring->page_pool = NULL;
+ ret = PTR_ERR(rx_ring->page_pool);
+ }
+
+ return ret;
+}
+
+/**
+ * wx_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int wx_setup_rx_resources(struct wx_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = NUMA_NO_NODE;
+ int size, ret;
+
+ size = sizeof(struct wx_rx_buffer) * rx_ring->count;
+
+ if (rx_ring->q_vector)
+ numa_node = rx_ring->q_vector->numa_node;
+
+ rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ set_dev_node(dev, numa_node);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!rx_ring->desc) {
+ set_dev_node(dev, orig_node);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ }
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ ret = wx_alloc_page_pool(rx_ring);
+ if (ret < 0) {
+ dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ kvfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * wx_setup_all_rx_resources - allocate all queues Rx resources
+ * @wx: pointer to hardware structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int wx_setup_all_rx_resources(struct wx *wx)
+{
+ int i, err = 0;
+
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ err = wx_setup_rx_resources(wx->rx_ring[i]);
+ if (!err)
+ continue;
+
+ wx_err(wx, "Allocation for Rx Queue %u failed\n", i);
+ goto err_setup_rx;
+ }
+
+ return 0;
+err_setup_rx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ wx_free_rx_resources(wx->rx_ring[i]);
+ return err;
+}
+
+/**
+ * wx_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int wx_setup_tx_resources(struct wx_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = NUMA_NO_NODE;
+ int size;
+
+ size = sizeof(struct wx_tx_buffer) * tx_ring->count;
+
+ if (tx_ring->q_vector)
+ numa_node = tx_ring->q_vector->numa_node;
+
+ tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ set_dev_node(dev, numa_node);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ set_dev_node(dev, orig_node);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ }
+
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+
+err:
+ kvfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * wx_setup_all_tx_resources - allocate all queues Tx resources
+ * @wx: pointer to private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int wx_setup_all_tx_resources(struct wx *wx)
+{
+ int i, err = 0;
+
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ err = wx_setup_tx_resources(wx->tx_ring[i]);
+ if (!err)
+ continue;
+
+ wx_err(wx, "Allocation for Tx Queue %u failed\n", i);
+ goto err_setup_tx;
+ }
+
+ return 0;
+err_setup_tx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ wx_free_tx_resources(wx->tx_ring[i]);
+ return err;
+}
+
+int wx_setup_resources(struct wx *wx)
+{
+ int err;
+
+ /* allocate transmit descriptors */
+ err = wx_setup_all_tx_resources(wx);
+ if (err)
+ return err;
+
+ /* allocate receive descriptors */
+ err = wx_setup_all_rx_resources(wx);
+ if (err)
+ goto err_free_tx;
+
+ err = wx_setup_isb_resources(wx);
+ if (err)
+ goto err_free_rx;
+
+ return 0;
+
+err_free_rx:
+ wx_free_all_rx_resources(wx);
+err_free_tx:
+ wx_free_all_tx_resources(wx);
+
+ return err;
+}
+EXPORT_SYMBOL(wx_setup_resources);
+
+/**
+ * wx_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
+ * @stats: storage space for 64bit statistics
+ */
+void wx_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ }
+
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp,
+ start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(wx_get_stats64);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
new file mode 100644
index 000000000000..50ee41f1fa10
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * WangXun Gigabit PCI Express Linux driver
+ * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+ */
+
+#ifndef _WX_LIB_H_
+#define _WX_LIB_H_
+
+void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count);
+u16 wx_desc_unused(struct wx_ring *ring);
+netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev);
+void wx_napi_enable_all(struct wx *wx);
+void wx_napi_disable_all(struct wx *wx);
+void wx_reset_interrupt_capability(struct wx *wx);
+void wx_clear_interrupt_scheme(struct wx *wx);
+int wx_init_interrupt_scheme(struct wx *wx);
+irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data);
+void wx_free_irq(struct wx *wx);
+int wx_setup_isb_resources(struct wx *wx);
+void wx_free_isb_resources(struct wx *wx);
+u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx);
+void wx_configure_vectors(struct wx *wx);
+void wx_clean_all_rx_rings(struct wx *wx);
+void wx_clean_all_tx_rings(struct wx *wx);
+void wx_free_resources(struct wx *wx);
+int wx_setup_resources(struct wx *wx);
+void wx_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+
+#endif /* _NGBE_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index c86a37914d43..eede93d4120d 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -5,6 +5,7 @@
#define _WX_TYPE_H_
#include <linux/bitfield.h>
+#include <linux/netdevice.h>
/* Vendor ID */
#ifndef PCI_VENDOR_ID_WANGXUN
@@ -65,21 +66,50 @@
/* port cfg Registers */
#define WX_CFG_PORT_CTL 0x14400
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
+#define WX_CFG_PORT_CTL_QINQ BIT(2)
+#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
+#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
+
+/* GPIO Registers */
+#define WX_GPIO_DR 0x14800
+#define WX_GPIO_DR_0 BIT(0) /* SDP0 Data Value */
+#define WX_GPIO_DR_1 BIT(1) /* SDP1 Data Value */
+#define WX_GPIO_DDR 0x14804
+#define WX_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */
+#define WX_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */
+#define WX_GPIO_CTL 0x14808
+#define WX_GPIO_INTEN 0x14830
+#define WX_GPIO_INTEN_0 BIT(0)
+#define WX_GPIO_INTEN_1 BIT(1)
+#define WX_GPIO_INTMASK 0x14834
+#define WX_GPIO_INTTYPE_LEVEL 0x14838
+#define WX_GPIO_POLARITY 0x1483C
+#define WX_GPIO_EOI 0x1484C
/*********************** Transmit DMA registers **************************/
/* transmit global control */
#define WX_TDM_CTL 0x18000
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
+#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
/***************************** RDB registers *********************************/
/* receive packet buffer */
#define WX_RDB_PB_CTL 0x19000
#define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */
#define WX_RDB_PB_CTL_DISABLED BIT(0)
+#define WX_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4))
+#define WX_RDB_PB_SZ_SHIFT 10
/* statistic */
#define WX_RDB_PFCMACDAL 0x19210
#define WX_RDB_PFCMACDAH 0x19214
+/* ring assignment */
+#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4))
+#define WX_RDB_PL_CFG_L4HDR BIT(1)
+#define WX_RDB_PL_CFG_L3HDR BIT(2)
+#define WX_RDB_PL_CFG_L2HDR BIT(3)
+#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
+#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
/******************************* PSR Registers *******************************/
/* psr control */
@@ -97,10 +127,24 @@
#define WX_PSR_CTL_MO_SHIFT 5
#define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT)
#define WX_PSR_CTL_TPE BIT(4)
+#define WX_PSR_MAX_SZ 0x15020
+#define WX_PSR_VLAN_CTL 0x15088
+#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */
+#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+/* VM L2 contorl */
+#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
+#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */
+#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */
+#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */
+#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */
+#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */
+#define WX_PSR_VM_L2CTL_BAM BIT(11) /* accept broadcast packets */
+#define WX_PSR_VM_L2CTL_MPE BIT(12) /* multicast promiscuous */
+
/* Management */
#define WX_PSR_MNG_FLEX_SEL 0x1582C
#define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16))
@@ -122,6 +166,27 @@
#define WX_PSR_MAC_SWC_IDX 0x16210
#define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU
+/********************************* RSEC **************************************/
+/* general rsec */
+#define WX_RSC_CTL 0x17000
+#define WX_RSC_CTL_SAVE_MAC_ERR BIT(6)
+#define WX_RSC_CTL_CRC_STRIP BIT(2)
+#define WX_RSC_CTL_RX_DIS BIT(1)
+#define WX_RSC_ST 0x17004
+#define WX_RSC_ST_RSEC_RDY BIT(0)
+
+/****************************** TDB ******************************************/
+#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
+#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+
+/****************************** TSEC *****************************************/
+/* Security Control Registers */
+#define WX_TSC_CTL 0x1D000
+#define WX_TSC_CTL_TX_DIS BIT(1)
+#define WX_TSC_CTL_TSEC_DIS BIT(0)
+#define WX_TSC_BUF_AE 0x1D00C
+#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
+
/************************************** MNG ********************************/
#define WX_MNG_SWFW_SYNC 0x1E008
#define WX_MNG_SWFW_SYNC_SW_MB BIT(2)
@@ -135,6 +200,7 @@
#define WX_MAC_TX_CFG 0x11000
#define WX_MAC_TX_CFG_TE BIT(0)
#define WX_MAC_TX_CFG_SPEED_MASK GENMASK(30, 29)
+#define WX_MAC_TX_CFG_SPEED_10G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 0)
#define WX_MAC_TX_CFG_SPEED_1G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 3)
#define WX_MAC_RX_CFG 0x11004
#define WX_MAC_RX_CFG_RE BIT(0)
@@ -151,10 +217,34 @@
/* Interrupt Registers */
#define WX_BME_CTL 0x12020
#define WX_PX_MISC_IC 0x100
+#define WX_PX_MISC_ICS 0x104
+#define WX_PX_MISC_IEN 0x108
+#define WX_PX_INTA 0x110
+#define WX_PX_GPIE 0x118
+#define WX_PX_GPIE_MODEL BIT(0)
+#define WX_PX_IC 0x120
#define WX_PX_IMS(_i) (0x140 + (_i) * 4)
+#define WX_PX_IMC(_i) (0x150 + (_i) * 4)
+#define WX_PX_ISB_ADDR_L 0x160
+#define WX_PX_ISB_ADDR_H 0x164
#define WX_PX_TRANSACTION_PENDING 0x168
+#define WX_PX_ITRSEL 0x180
+#define WX_PX_ITR(_i) (0x200 + (_i) * 4)
+#define WX_PX_ITR_CNT_WDIS BIT(31)
+#define WX_PX_MISC_IVAR 0x4FC
+#define WX_PX_IVAR(_i) (0x500 + (_i) * 4)
+
+#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+#define WX_7K_ITR 595
+#define WX_12K_ITR 336
+#define WX_SP_MAX_EITR 0x00000FF8U
+#define WX_EM_MAX_EITR 0x00007FFCU
/* transmit DMA Registers */
+#define WX_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40))
+#define WX_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40))
+#define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40))
+#define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40))
#define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40))
/* Transmit Config masks */
#define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */
@@ -164,8 +254,22 @@
#define WX_PX_TR_CFG_THRE_SHIFT 8
/* Receive DMA Registers */
+#define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40))
+#define WX_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40))
+#define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40))
+#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40))
#define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40))
/* PX_RR_CFG bit definitions */
+#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
+#define WX_PX_RR_CFG_RR_THER_SHIFT 16
+#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
+#define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8)
+#define WX_PX_RR_CFG_BHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6)
+ * + at bit 8 offset (<< 12)
+ * = (<< 6)
+ */
+#define WX_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */
+#define WX_PX_RR_CFG_RR_SIZE_SHIFT 1
#define WX_PX_RR_CFG_RR_EN BIT(0)
/* Number of 80 microseconds we wait for PCI Express master disable */
@@ -193,8 +297,46 @@
#define WX_MAC_STATE_MODIFIED 0x2
#define WX_MAC_STATE_IN_USE 0x4
+#define WX_MAX_RXD 8192
+#define WX_MAX_TXD 8192
+
+/* Supported Rx Buffer Sizes */
+#define WX_RXBUFFER_256 256 /* Used for skb receive header */
+#define WX_RXBUFFER_2K 2048
+#define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+#if MAX_SKB_FRAGS < 8
+#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024)
+#else
+#define WX_RX_BUFSZ WX_RXBUFFER_2K
+#endif
+
+#define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define WX_MAX_DATA_PER_TXD BIT(14)
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* Ether Types */
+#define WX_ETH_P_CNM 0x22E7
+
#define WX_CFG_PORT_ST 0x14404
+/******************* Receive Descriptor bit definitions **********************/
+#define WX_RXD_STAT_DD BIT(0) /* Done */
+#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */
+
+#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
+
+/*********************** Transmit Descriptor Config Masks ****************/
+#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */
+#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */
+#define WX_TXD_PAYLEN_SHIFT 13 /* Desc PAYLEN shift */
+#define WX_TXD_EOP BIT(24) /* End of Packet */
+#define WX_TXD_IFCS BIT(25) /* Insert FCS */
+#define WX_TXD_RS BIT(27) /* Report Status */
+
/* Host Interface Command Structures */
struct wx_hic_hdr {
u8 cmd;
@@ -270,9 +412,12 @@ struct wx_mac_info {
bool set_lben;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
+ u32 mta_shadow[128];
s32 mc_filter_type;
u32 mcft_size;
u32 num_rar_entries;
+ u32 rx_pb_size;
+ u32 tx_pb_size;
u32 max_tx_queues;
u32 max_rx_queues;
@@ -312,6 +457,161 @@ enum wx_reset_type {
WX_GLOBAL_RESET
};
+struct wx_cb {
+ dma_addr_t dma;
+ u16 append_cnt; /* number of skb's appended */
+ bool page_released;
+ bool dma_released;
+};
+
+#define WX_CB(skb) ((struct wx_cb *)(skb)->cb)
+
+/* Transmit Descriptor */
+union wx_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor */
+union wx_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define WX_RX_DESC(R, i) \
+ (&(((union wx_rx_desc *)((R)->desc))[i]))
+#define WX_TX_DESC(R, i) \
+ (&(((union wx_tx_desc *)((R)->desc))[i]))
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct wx_tx_buffer {
+ union wx_tx_desc *next_to_watch;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct wx_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ dma_addr_t page_dma;
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
+};
+
+struct wx_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+/* iterator for handling rings in ring container */
+#define wx_for_each_ring(posm, headm) \
+ for (posm = (headm).ring; posm; posm = posm->next)
+
+struct wx_ring_container {
+ struct wx_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct wx_ring {
+ struct wx_ring *next; /* pointer to next ring in q_vector */
+ struct wx_q_vector *q_vector; /* backpointer to host q_vector */
+ struct net_device *netdev; /* netdev ring belongs to */
+ struct device *dev; /* device for DMA mapping */
+ struct page_pool *page_pool;
+ void *desc; /* descriptor ring memory */
+ union {
+ struct wx_tx_buffer *tx_buffer_info;
+ struct wx_rx_buffer *rx_buffer_info;
+ };
+ u8 __iomem *tail;
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+
+ u16 count; /* amount of descriptors */
+
+ u8 queue_index; /* needed for multiqueue queue management */
+ u8 reg_idx; /* holds the special value that gets
+ * the hardware register offset
+ * associated with this ring, which is
+ * different for DCB and RSS modes
+ */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
+
+ struct wx_queue_stats stats;
+ struct u64_stats_sync syncp;
+} ____cacheline_internodealigned_in_smp;
+
+struct wx_q_vector {
+ struct wx *wx;
+ int cpu; /* CPU for DCA */
+ int numa_node;
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring
+ */
+ u16 itr; /* Interrupt throttle rate written to EITR */
+ struct wx_ring_container rx, tx;
+ struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
+ char name[IFNAMSIZ + 17];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct wx_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+enum wx_isb_idx {
+ WX_ISB_HEADER,
+ WX_ISB_MISC,
+ WX_ISB_VEC0,
+ WX_ISB_VEC1,
+ WX_ISB_MAX
+};
+
struct wx {
u8 __iomem *hw_addr;
struct pci_dev *pdev;
@@ -331,6 +631,7 @@ struct wx {
u16 oem_svid;
u16 msg_enable;
bool adapter_stopped;
+ u16 tpid[8];
char eeprom_id[32];
enum wx_reset_type reset_type;
@@ -360,6 +661,18 @@ struct wx {
u32 tx_ring_count;
u32 rx_ring_count;
+ struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
+ struct wx_ring *rx_ring[64];
+ struct wx_q_vector *q_vector[64];
+
+ unsigned int queues_per_pool;
+ struct msix_entry *msix_entries;
+
+ /* misc interrupt status block */
+ dma_addr_t isb_dma;
+ u32 *isb_mem;
+ u32 isb_tag[WX_ISB_MAX];
+
#define WX_MAX_RETA_ENTRIES 128
u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
@@ -371,6 +684,7 @@ struct wx {
};
#define WX_INTR_ALL (~0ULL)
+#define WX_INTR_Q(i) BIT(i)
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index ed52f80b5475..f94d415daf3c 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -13,6 +13,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_lib.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
#include "ngbe_hw.h"
@@ -112,6 +113,9 @@ static int ngbe_sw_init(struct wx *wx)
wx->mac.num_rar_entries = NGBE_RAR_ENTRIES;
wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES;
wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES;
+ wx->mac.mcft_size = NGBE_MC_TBL_SIZE;
+ wx->mac.rx_pb_size = NGBE_RX_PB_SIZE;
+ wx->mac.tx_pb_size = NGBE_TDB_PB_SZ;
/* PCI config space info */
err = wx_sw_init(wx);
@@ -148,27 +152,211 @@ static int ngbe_sw_init(struct wx *wx)
return 0;
}
+/**
+ * ngbe_irq_enable - Enable default interrupt generation settings
+ * @wx: board private structure
+ * @queues: enable all queues interrupts
+ **/
+static void ngbe_irq_enable(struct wx *wx, bool queues)
+{
+ u32 mask;
+
+ /* enable misc interrupt */
+ mask = NGBE_PX_MISC_IEN_MASK;
+
+ wr32(wx, WX_GPIO_DDR, WX_GPIO_DDR_0);
+ wr32(wx, WX_GPIO_INTEN, WX_GPIO_INTEN_0 | WX_GPIO_INTEN_1);
+ wr32(wx, WX_GPIO_INTTYPE_LEVEL, 0x0);
+ wr32(wx, WX_GPIO_POLARITY, wx->gpio_ctrl ? 0 : 0x3);
+
+ wr32(wx, WX_PX_MISC_IEN, mask);
+
+ /* mask interrupt */
+ if (queues)
+ wx_intr_enable(wx, NGBE_INTR_ALL);
+ else
+ wx_intr_enable(wx, NGBE_INTR_MISC(wx));
+}
+
+/**
+ * ngbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the EICR read.
+ */
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+
+ /* re-enable the original interrupt state, no lsc, no queues */
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ngbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * ngbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ngbe_request_msix_irqs(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ err = request_irq(wx->msix_entries[vector].vector,
+ ngbe_msix_other, 0, netdev->name, wx);
+
+ if (err) {
+ wx_err(wx, "request_irq for msix_other failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+
+/**
+ * ngbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ngbe_request_irq(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ if (pdev->msix_enabled)
+ err = ngbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(pdev->irq, ngbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(pdev->irq, ngbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
static void ngbe_disable_device(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ u32 i;
+ /* disable all enabled rx queues */
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ wx_disable_rx_queue(wx, wx->rx_ring[i]);
/* disable receives */
wx_disable_rx(wx);
+ wx_napi_disable_all(wx);
+ netif_tx_stop_all_queues(netdev);
netif_tx_disable(netdev);
if (wx->gpio_ctrl)
ngbe_sfp_modules_txrx_powerctl(wx, false);
+ wx_irq_disable(wx);
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ u8 reg_idx = wx->tx_ring[i]->reg_idx;
+
+ wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
+ }
}
static void ngbe_down(struct wx *wx)
{
phy_stop(wx->phydev);
ngbe_disable_device(wx);
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
}
static void ngbe_up(struct wx *wx)
{
+ wx_configure_vectors(wx);
+
+ /* make sure to complete pre-operations */
+ smp_mb__before_atomic();
+ wx_napi_enable_all(wx);
+ /* enable transmits */
+ netif_tx_start_all_queues(wx->netdev);
+
+ /* clear any pending interrupts, may auto mask */
+ rd32(wx, WX_PX_IC);
+ rd32(wx, WX_PX_MISC_IC);
+ ngbe_irq_enable(wx, true);
if (wx->gpio_ctrl)
ngbe_sfp_modules_txrx_powerctl(wx, true);
+
phy_start(wx->phydev);
}
@@ -187,12 +375,39 @@ static int ngbe_open(struct net_device *netdev)
int err;
wx_control_hw(wx, true);
- err = ngbe_phy_connect(wx);
+
+ err = wx_setup_resources(wx);
if (err)
return err;
+
+ wx_configure(wx);
+
+ err = ngbe_request_irq(wx);
+ if (err)
+ goto err_free_resources;
+
+ err = ngbe_phy_connect(wx);
+ if (err)
+ goto err_free_irq;
+
+ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ if (err)
+ goto err_dis_phy;
+
+ err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
+ if (err)
+ goto err_dis_phy;
+
ngbe_up(wx);
return 0;
+err_dis_phy:
+ phy_disconnect(wx->phydev);
+err_free_irq:
+ wx_free_irq(wx);
+err_free_resources:
+ wx_free_resources(wx);
+ return err;
}
/**
@@ -211,18 +426,14 @@ static int ngbe_close(struct net_device *netdev)
struct wx *wx = netdev_priv(netdev);
ngbe_down(wx);
+ wx_free_irq(wx);
+ wx_free_resources(wx);
phy_disconnect(wx->phydev);
wx_control_hw(wx, false);
return 0;
}
-static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
-{
- return NETDEV_TX_OK;
-}
-
static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct wx *wx = pci_get_drvdata(pdev);
@@ -258,9 +469,11 @@ static void ngbe_shutdown(struct pci_dev *pdev)
static const struct net_device_ops ngbe_netdev_ops = {
.ndo_open = ngbe_open,
.ndo_stop = ngbe_close,
- .ndo_start_xmit = ngbe_xmit_frame,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_set_rx_mode = wx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
+ .ndo_get_stats64 = wx_get_stats64,
};
/**
@@ -336,6 +549,17 @@ static int ngbe_probe(struct pci_dev *pdev,
netdev->netdev_ops = &ngbe_netdev_ops;
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = NETIF_F_SG;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features |
+ NETIF_F_RXALL;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
wx->bd_number = func_nums;
/* setup the private structure */
@@ -411,10 +635,14 @@ static int ngbe_probe(struct pci_dev *pdev,
eth_hw_addr_set(netdev, wx->mac.perm_addr);
wx_mac_set_default_filter(wx, wx->mac.perm_addr);
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_mac_table;
+
/* phy Interface Configuration */
err = ngbe_mdio_init(wx);
if (err)
- goto err_free_mac_table;
+ goto err_clear_interrupt_scheme;
err = register_netdev(netdev);
if (err)
@@ -431,6 +659,8 @@ static int ngbe_probe(struct pci_dev *pdev,
err_register:
wx_control_hw(wx, false);
+err_clear_interrupt_scheme:
+ wx_clear_interrupt_scheme(wx);
err_free_mac_table:
kfree(wx->mac_table);
err_pci_release_regions:
@@ -462,6 +692,7 @@ static void ngbe_remove(struct pci_dev *pdev)
pci_select_bars(pdev, IORESOURCE_MEM));
kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index fd71260f73de..a2351349785e 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -90,6 +90,20 @@ enum NGBE_MSCA_CMD_value {
#define NGBE_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */
#define NGBE_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */
+/* Extended Interrupt Enable Set */
+#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
+#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
+#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
+#define NGBE_PX_MISC_IEN_GPIO BIT(26)
+#define NGBE_PX_MISC_IEN_MASK ( \
+ NGBE_PX_MISC_IEN_DEV_RST | \
+ NGBE_PX_MISC_IEN_ETH_LK | \
+ NGBE_PX_MISC_IEN_INT_ERR | \
+ NGBE_PX_MISC_IEN_GPIO)
+
+#define NGBE_INTR_ALL 0x1FF
+#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
#define NGBE_CFG_LAN_SPEED 0x14440
#define NGBE_CFG_PORT_ST 0x14404
@@ -120,6 +134,10 @@ enum NGBE_MSCA_CMD_value {
#define NGBE_ETH_LENGTH_OF_ADDRESS 6
#define NGBE_MAX_MSIX_VECTORS 0x09
#define NGBE_RAR_ENTRIES 32
+#define NGBE_RX_PB_SIZE 42
+#define NGBE_MC_TBL_SIZE 128
+#define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */
+#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
/* TX/RX descriptor defines */
#define NGBE_DEFAULT_TXD 512 /* default ring size */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index aa4d09df3b01..094df377726b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -11,6 +11,7 @@
#include <net/ip.h>
#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
@@ -72,9 +73,177 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
+/**
+ * txgbe_irq_enable - Enable default interrupt generation settings
+ * @wx: pointer to private structure
+ * @queues: enable irqs for queues
+ **/
+static void txgbe_irq_enable(struct wx *wx, bool queues)
+{
+ /* unmask interrupt */
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ if (queues)
+ wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
+}
+
+/**
+ * txgbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ /* re-enable link(maybe) and non-queue interrupts, no flush.
+ * txgbe_poll will re-enable the queue interrupts
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+
+ /* re-enable the original interrupt state */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * Allocate MSI-X vectors and request interrupts from the kernel.
+ **/
+static int txgbe_request_msix_irqs(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ err = request_irq(wx->msix_entries[vector].vector,
+ txgbe_msix_other, 0, netdev->name, wx);
+ if (err) {
+ wx_err(wx, "request_irq for msix_other failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+
+/**
+ * txgbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int txgbe_request_irq(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ if (pdev->msix_enabled)
+ err = txgbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
static void txgbe_up_complete(struct wx *wx)
{
+ u32 reg;
+
wx_control_hw(wx, true);
+ wx_configure_vectors(wx);
+
+ /* make sure to complete pre-operations */
+ smp_mb__before_atomic();
+ wx_napi_enable_all(wx);
+
+ /* clear any pending interrupts, may auto mask */
+ rd32(wx, WX_PX_IC);
+ rd32(wx, WX_PX_MISC_IC);
+ txgbe_irq_enable(wx, true);
+
+ /* Configure MAC Rx and Tx when link is up */
+ reg = rd32(wx, WX_MAC_RX_CFG);
+ wr32(wx, WX_MAC_RX_CFG, reg);
+ wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+ reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+ reg = rd32(wx, WX_MAC_TX_CFG);
+ wr32(wx, WX_MAC_TX_CFG, (reg & ~WX_MAC_TX_CFG_SPEED_MASK) | WX_MAC_TX_CFG_SPEED_10G);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(wx->netdev);
+ netif_carrier_on(wx->netdev);
}
static void txgbe_reset(struct wx *wx)
@@ -96,14 +265,24 @@ static void txgbe_reset(struct wx *wx)
static void txgbe_disable_device(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ u32 i;
wx_disable_pcie_master(wx);
/* disable receives */
wx_disable_rx(wx);
+ /* disable all enabled rx queues */
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ wx_disable_rx_queue(wx, wx->rx_ring[i]);
+
+ netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ wx_irq_disable(wx);
+ wx_napi_disable_all(wx);
+
if (wx->bus.func < 2)
wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
else
@@ -116,6 +295,13 @@ static void txgbe_disable_device(struct wx *wx)
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
}
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ u8 reg_idx = wx->tx_ring[i]->reg_idx;
+
+ wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
+ }
+
/* Disable the Tx DMA engine */
wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0);
}
@@ -124,6 +310,9 @@ static void txgbe_down(struct wx *wx)
{
txgbe_disable_device(wx);
txgbe_reset(wx);
+
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
}
/**
@@ -132,12 +321,15 @@ static void txgbe_down(struct wx *wx)
**/
static int txgbe_sw_init(struct wx *wx)
{
+ u16 msix_count = 0;
int err;
wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
+ wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
+ wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
/* PCI config space info */
err = wx_sw_init(wx);
@@ -156,6 +348,25 @@ static int txgbe_sw_init(struct wx *wx)
break;
}
+ /* Set common capability flags and settings */
+ wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS;
+ err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS);
+ if (err)
+ wx_err(wx, "Do not support MSI-X\n");
+ wx->mac.max_msix_vectors = msix_count;
+
+ /* enable itr by default in dynamic mode */
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+
+ /* set default ring sizes */
+ wx->tx_ring_count = TXGBE_DEFAULT_TXD;
+ wx->rx_ring_count = TXGBE_DEFAULT_RXD;
+
+ /* set default work limits */
+ wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
+ wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+
return 0;
}
@@ -171,10 +382,39 @@ static int txgbe_sw_init(struct wx *wx)
static int txgbe_open(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ int err;
+
+ err = wx_setup_resources(wx);
+ if (err)
+ goto err_reset;
+
+ wx_configure(wx);
+
+ err = txgbe_request_irq(wx);
+ if (err)
+ goto err_free_isb;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ if (err)
+ goto err_free_irq;
+
+ err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
+ if (err)
+ goto err_free_irq;
txgbe_up_complete(wx);
return 0;
+
+err_free_irq:
+ wx_free_irq(wx);
+err_free_isb:
+ wx_free_isb_resources(wx);
+err_reset:
+ txgbe_reset(wx);
+
+ return err;
}
/**
@@ -187,6 +427,7 @@ static int txgbe_open(struct net_device *netdev)
static void txgbe_close_suspend(struct wx *wx)
{
txgbe_disable_device(wx);
+ wx_free_resources(wx);
}
/**
@@ -205,6 +446,8 @@ static int txgbe_close(struct net_device *netdev)
struct wx *wx = netdev_priv(netdev);
txgbe_down(wx);
+ wx_free_irq(wx);
+ wx_free_resources(wx);
wx_control_hw(wx, false);
return 0;
@@ -240,18 +483,14 @@ static void txgbe_shutdown(struct pci_dev *pdev)
}
}
-static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
-{
- return NETDEV_TX_OK;
-}
-
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
- .ndo_start_xmit = txgbe_xmit_frame,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_set_rx_mode = wx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
+ .ndo_get_stats64 = wx_get_stats64,
};
/**
@@ -354,6 +593,16 @@ static int txgbe_probe(struct pci_dev *pdev,
}
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = NETIF_F_SG;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features | NETIF_F_RXALL;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
/* make sure the EEPROM is good */
err = txgbe_validate_eeprom_checksum(wx, NULL);
@@ -367,6 +616,10 @@ static int txgbe_probe(struct pci_dev *pdev,
eth_hw_addr_set(netdev, wx->mac.perm_addr);
wx_mac_set_default_filter(wx, wx->mac.perm_addr);
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_mac_table;
+
/* Save off EEPROM version number and Option Rom version which
* together make a unique identify for the eeprom
*/
@@ -411,6 +664,8 @@ static int txgbe_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, wx);
+ netif_tx_stop_all_queues(netdev);
+
/* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough
* bandwidth due to being older generation PCIe parts. We clamp these
@@ -435,6 +690,7 @@ static int txgbe_probe(struct pci_dev *pdev,
return 0;
err_release_hw:
+ wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
err_free_mac_table:
kfree(wx->mac_table);
@@ -468,6 +724,7 @@ static void txgbe_remove(struct pci_dev *pdev)
pci_select_bars(pdev, IORESOURCE_MEM));
kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index cbd705a9f4bd..563ea51deca6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -67,6 +67,7 @@
#define TXGBE_PBANUM1_PTR 0x06
#define TXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
@@ -76,6 +77,26 @@
#define TXGBE_SP_MAX_RX_QUEUES 128
#define TXGBE_SP_RAR_ENTRIES 128
#define TXGBE_SP_MC_TBL_SIZE 128
+#define TXGBE_SP_RX_PB_SIZE 512
+#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
+
+/* TX/RX descriptor defines */
+#define TXGBE_DEFAULT_TXD 512
+#define TXGBE_DEFAULT_TX_WORK 256
+
+#if (PAGE_SIZE < 8192)
+#define TXGBE_DEFAULT_RXD 512
+#define TXGBE_DEFAULT_RX_WORK 256
+#else
+#define TXGBE_DEFAULT_RXD 256
+#define TXGBE_DEFAULT_RX_WORK 128
+#endif
+
+#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
+
+#define TXGBE_MAX_EITR GENMASK(11, 3)
extern char txgbe_driver_name[];
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9352dad58996..79f4e13620a4 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -987,9 +987,6 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
void netvsc_dma_unmap(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
int i;
if (!hv_is_isolation_supported())
@@ -998,7 +995,7 @@ void netvsc_dma_unmap(struct hv_device *hv_dev,
if (!packet->dma_range)
return;
- for (i = 0; i < page_count; i++)
+ for (i = 0; i < packet->page_buf_cnt; i++)
dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
packet->dma_range[i].mapping_size,
DMA_TO_DEVICE);
@@ -1028,9 +1025,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->page_buf_cnt;
dma_addr_t dma;
int i;
@@ -1039,7 +1034,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
packet->dma_range = kcalloc(page_count,
sizeof(*packet->dma_range),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!packet->dma_range)
return -ENOMEM;
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index bea2da1c4c51..2cb1710f6ac3 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -182,6 +182,41 @@ static bool gsi_channel_initialized(struct gsi_channel *channel)
return !!channel->gsi;
}
+/* Encode the channel protocol for the CH_C_CNTXT_0 register */
+static u32 ch_c_cntxt_0_type_encode(enum ipa_version version,
+ enum gsi_channel_type type)
+{
+ u32 val;
+
+ val = u32_encode_bits(type, CHTYPE_PROTOCOL_FMASK);
+ if (version < IPA_VERSION_4_5)
+ return val;
+
+ type >>= hweight32(CHTYPE_PROTOCOL_FMASK);
+
+ return val | u32_encode_bits(type, CHTYPE_PROTOCOL_MSB_FMASK);
+}
+
+/* Encode a channel ring buffer length for the CH_C_CNTXT_1 register */
+static u32 ch_c_cntxt_1_length_encode(enum ipa_version version, u32 length)
+{
+ if (version < IPA_VERSION_4_9)
+ return u32_encode_bits(length, GENMASK(15, 0));
+
+ return u32_encode_bits(length, GENMASK(19, 0));
+}
+
+/* Encode the length of the event channel ring buffer for the
+ * EV_CH_E_CNTXT_1 register.
+ */
+static u32 ev_ch_e_cntxt_1_length_encode(enum ipa_version version, u32 length)
+{
+ if (version < IPA_VERSION_4_9)
+ return u32_encode_bits(length, GENMASK(15, 0));
+
+ return u32_encode_bits(length, GENMASK(19, 0));
+}
+
/* Update the GSI IRQ type register with the cached value */
static void gsi_irq_type_update(struct gsi *gsi, u32 val)
{
@@ -191,12 +226,12 @@ static void gsi_irq_type_update(struct gsi *gsi, u32 val)
static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | type_id);
}
static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~type_id);
}
/* Event ring commands are performed one at a time. Their completion
@@ -292,19 +327,19 @@ static void gsi_irq_enable(struct gsi *gsi)
/* Global interrupts include hardware error reports. Enable
* that so we can at least report the error should it occur.
*/
- iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
+ iowrite32(ERROR_INT, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE);
/* General GSI interrupts are reported to all EEs; if they occur
* they are unrecoverable (without reset). A breakpoint interrupt
* also exists, but we don't support that. We want to be notified
* of errors so we can report them, even if they can't be handled.
*/
- val = BIT(BUS_ERROR);
- val |= BIT(CMD_FIFO_OVRFLOW);
- val |= BIT(MCS_STACK_OVRFLOW);
+ val = BUS_ERROR;
+ val |= CMD_FIFO_OVRFLOW;
+ val |= MCS_STACK_OVRFLOW;
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL);
}
/* Disable all GSI interrupt types */
@@ -676,7 +711,7 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
size = ring->count * GSI_RING_ELEMENT_SIZE;
- val = ev_r_length_encoded(gsi->version, size);
+ val = ev_ch_e_cntxt_1_length_encode(gsi->version, size);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
/* The context 2 and 3 registers store the low-order and
@@ -765,14 +800,14 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
u32 val;
/* We program all channels as GPI type/protocol */
- val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
+ val = ch_c_cntxt_0_type_encode(gsi->version, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
val |= CHTYPE_DIR_FMASK;
val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
- val = r_length_encoded(gsi->version, size);
+ val = ch_c_cntxt_1_length_encode(gsi->version, size);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
/* The context 2 and 3 registers store the low-order and
@@ -1195,15 +1230,15 @@ static void gsi_isr_glob_ee(struct gsi *gsi)
val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
- if (val & BIT(ERROR_INT))
+ if (val & ERROR_INT)
gsi_isr_glob_err(gsi);
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
- val &= ~BIT(ERROR_INT);
+ val &= ~ERROR_INT;
- if (val & BIT(GP_INT1)) {
- val ^= BIT(GP_INT1);
+ if (val & GP_INT1) {
+ val ^= GP_INT1;
gsi_isr_gp_int1(gsi);
}
@@ -1264,19 +1299,19 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
intr_mask ^= gsi_intr;
switch (gsi_intr) {
- case BIT(GSI_CH_CTRL):
+ case GSI_CH_CTRL:
gsi_isr_chan_ctrl(gsi);
break;
- case BIT(GSI_EV_CTRL):
+ case GSI_EV_CTRL:
gsi_isr_evt_ctrl(gsi);
break;
- case BIT(GSI_GLOB_EE):
+ case GSI_GLOB_EE:
gsi_isr_glob_ee(gsi);
break;
- case BIT(GSI_IEOB):
+ case GSI_IEOB:
gsi_isr_ieob(gsi);
break;
- case BIT(GSI_GENERAL):
+ case GSI_GENERAL:
gsi_isr_general(gsi);
break;
default:
@@ -1654,7 +1689,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
* channel), and only from this function. So we enable the GP_INT1
* IRQ type here, and disable it again after the command completes.
*/
- val = BIT(ERROR_INT) | BIT(GP_INT1);
+ val = ERROR_INT | GP_INT1;
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
/* First zero the result code field */
@@ -1666,12 +1701,13 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
+ if (gsi->version >= IPA_VERSION_4_11)
+ val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
/* Disable the GP_INT1 IRQ type again */
- iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ iowrite32(ERROR_INT, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
if (!timeout)
return gsi->result;
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 3763359f208f..d171f65d4198 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -62,6 +62,18 @@
/* All other register offsets are relative to gsi->virt */
+#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
+ (0x0001c000 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
+#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
+#define CHTYPE_DIR_FMASK GENMASK(3, 3)
+#define EE_FMASK GENMASK(7, 4)
+#define CHID_FMASK GENMASK(12, 8)
+/* The next field is present for IPA v4.5 and above */
+#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
+#define ERINDEX_FMASK GENMASK(18, 14)
+#define CHSTATE_FMASK GENMASK(23, 20)
+#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
+
/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */
enum gsi_channel_type {
GSI_CHANNEL_TYPE_MHI = 0x0,
@@ -76,46 +88,9 @@ enum gsi_channel_type {
GSI_CHANNEL_TYPE_11AD = 0x9,
};
-#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
- (0x0001c000 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
-#define CHTYPE_DIR_FMASK GENMASK(3, 3)
-#define EE_FMASK GENMASK(7, 4)
-#define CHID_FMASK GENMASK(12, 8)
-/* The next field is present for IPA v4.5 and above */
-#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
-#define ERINDEX_FMASK GENMASK(18, 14)
-#define CHSTATE_FMASK GENMASK(23, 20)
-#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
-
-/* Encoded value for CH_C_CNTXT_0 register channel protocol fields */
-static inline u32
-chtype_protocol_encoded(enum ipa_version version, enum gsi_channel_type type)
-{
- u32 val;
-
- val = u32_encode_bits(type, CHTYPE_PROTOCOL_FMASK);
- if (version < IPA_VERSION_4_5)
- return val;
-
- /* Encode upper bit(s) as well */
- type >>= hweight32(CHTYPE_PROTOCOL_FMASK);
- val |= u32_encode_bits(type, CHTYPE_PROTOCOL_MSB_FMASK);
-
- return val;
-}
-
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
(0x0001c004 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-/* Encoded value for CH_C_CNTXT_1 register R_LENGTH field */
-static inline u32 r_length_encoded(enum ipa_version version, u32 length)
-{
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(length, GENMASK(15, 0));
- return u32_encode_bits(length, GENMASK(19, 0));
-}
-
#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
(0x0001c008 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
@@ -167,13 +142,6 @@ enum gsi_prefetch_mode {
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
(0x0001d004 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-/* Encoded value for EV_CH_C_CNTXT_1 register EV_R_LENGTH field */
-static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
-{
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(length, GENMASK(15, 0));
- return u32_encode_bits(length, GENMASK(19, 0));
-}
#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
(0x0001d008 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
@@ -299,15 +267,25 @@ enum gsi_iram_size {
#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
(0x0001f088 + 0x4000 * GSI_EE_AP)
-/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
+/**
+ * enum gsi_irq_type_id: GSI IRQ types
+ * @GSI_CH_CTRL: Channel allocation, deallocation, etc.
+ * @GSI_EV_CTRL: Event ring allocation, deallocation, etc.
+ * @GSI_GLOB_EE: Global/general event
+ * @GSI_IEOB: Transfer (TRE) completion
+ * @GSI_INTER_EE_CH_CTRL: Remote-issued stop/reset (unused)
+ * @GSI_INTER_EE_EV_CTRL: Remote-issued event reset (unused)
+ * @GSI_GENERAL: General hardware event (bus error, etc.)
+ */
enum gsi_irq_type_id {
- GSI_CH_CTRL = 0x0, /* channel allocation, etc. */
- GSI_EV_CTRL = 0x1, /* event ring allocation, etc. */
- GSI_GLOB_EE = 0x2, /* global/general event */
- GSI_IEOB = 0x3, /* TRE completion */
- GSI_INTER_EE_CH_CTRL = 0x4, /* remote-issued stop/reset (unused) */
- GSI_INTER_EE_EV_CTRL = 0x5, /* remote-issued event reset (unused) */
- GSI_GENERAL = 0x6, /* general-purpose event */
+ GSI_CH_CTRL = BIT(0),
+ GSI_EV_CTRL = BIT(1),
+ GSI_GLOB_EE = BIT(2),
+ GSI_IEOB = BIT(3),
+ GSI_INTER_EE_CH_CTRL = BIT(4),
+ GSI_INTER_EE_EV_CTRL = BIT(5),
+ GSI_GENERAL = BIT(6),
+ /* IRQ types 7-31 (and their bit values) are reserved */
};
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
@@ -343,12 +321,14 @@ enum gsi_irq_type_id {
(0x0001f108 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
(0x0001f110 + 0x4000 * GSI_EE_AP)
-/* Values here are bit positions in the GLOB_IRQ_* registers */
+
+/** enum gsi_global_irq_id: Global GSI interrupt events */
enum gsi_global_irq_id {
- ERROR_INT = 0x0,
- GP_INT1 = 0x1,
- GP_INT2 = 0x2,
- GP_INT3 = 0x3,
+ ERROR_INT = BIT(0),
+ GP_INT1 = BIT(1),
+ GP_INT2 = BIT(2),
+ GP_INT3 = BIT(3),
+ /* Global IRQ types 4-31 (and their bit values) are reserved */
};
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
@@ -357,12 +337,14 @@ enum gsi_global_irq_id {
(0x0001f120 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
(0x0001f128 + 0x4000 * GSI_EE_AP)
-/* Values here are bit positions in the (general) GSI_IRQ_* registers */
-enum gsi_general_id {
- BREAK_POINT = 0x0,
- BUS_ERROR = 0x1,
- CMD_FIFO_OVRFLOW = 0x2,
- MCS_STACK_OVRFLOW = 0x3,
+
+/** enum gsi_general_irq_id: GSI general IRQ conditions */
+enum gsi_general_irq_id {
+ BREAK_POINT = BIT(0),
+ BUS_ERROR = BIT(1),
+ CMD_FIFO_OVRFLOW = BIT(2),
+ MCS_STACK_OVRFLOW = BIT(3),
+ /* General IRQ types 4-31 (and their bit values) are reserved */
};
#define GSI_CNTXT_INTSET_OFFSET \
@@ -372,7 +354,6 @@ enum gsi_general_id {
#define GSI_ERROR_LOG_OFFSET \
(0x0001f200 + 0x4000 * GSI_EE_AP)
-/* Fields below are present for IPA v3.5.1 and above */
#define ERR_ARG3_FMASK GENMASK(3, 0)
#define ERR_ARG2_FMASK GENMASK(7, 4)
#define ERR_ARG1_FMASK GENMASK(11, 8)
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 5372db58b5bd..f3355e040a9e 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -45,7 +45,6 @@ struct ipa_interrupt;
* @interrupt: IPA Interrupt information
* @uc_powered: true if power is active by proxy for microcontroller
* @uc_loaded: true after microcontroller has reported it's ready
- * @reg_addr: DMA address used for IPA register access
* @reg_virt: Virtual address used for IPA register access
* @regs: IPA register definitions
* @mem_addr: DMA address of IPA-local memory space
@@ -97,9 +96,8 @@ struct ipa {
bool uc_powered;
bool uc_loaded;
- dma_addr_t reg_addr;
void __iomem *reg_virt;
- const struct ipa_regs *regs;
+ const struct regs *regs;
dma_addr_t mem_addr;
void *mem_virt;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index bb3dfa9a2bc8..f1419fbd776c 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -94,11 +94,11 @@ struct ipa_cmd_register_write {
/* IPA_CMD_IP_PACKET_INIT */
struct ipa_cmd_ip_packet_init {
- u8 dest_endpoint;
+ u8 dest_endpoint; /* Full 8 bits used for IPA v5.0+ */
u8 reserved[7];
};
-/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
+/* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */
#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
/* IPA_CMD_DMA_SHARED_MEM */
@@ -157,9 +157,14 @@ static void ipa_cmd_validate_build(void)
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
- /* Valid endpoint numbers must fit in the IP packet init command */
- BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
- IPA_ENDPOINT_MAX - 1);
+ /* Prior to IPA v5.0, we supported no more than 32 endpoints,
+ * and this was reflected in some 5-bit fields that held
+ * endpoint numbers. Starting with IPA v5.0, the widths of
+ * these fields were extended to 8 bits, meaning up to 256
+ * endpoints. If the driver claims to support more than
+ * that it's an error.
+ */
+ BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX);
}
/* Validate a memory region holding a table */
@@ -282,7 +287,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
/* Check whether offsets passed to register_write are valid */
static bool ipa_cmd_register_write_valid(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
const char *name;
u32 offset;
@@ -290,8 +295,12 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
- reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
- offset = ipa_reg_offset(reg);
+ if (ipa->version < IPA_VERSION_5_0)
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ else
+ reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
+
+ offset = reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -305,7 +314,7 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* fits in the register write command field(s) that must hold it.
*/
reg = ipa_reg(ipa, ENDP_STATUS);
- offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
+ offset = reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -486,8 +495,13 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_init;
- payload->dest_endpoint = u8_encode_bits(endpoint_id,
- IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+ if (ipa->version < IPA_VERSION_5_0) {
+ payload->dest_endpoint =
+ u8_encode_bits(endpoint_id,
+ IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+ } else {
+ payload->dest_endpoint = endpoint_id;
+ }
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index ce7f2d6e447e..2ee80ed140b7 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -241,7 +241,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (!data->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 buffer_size;
u32 aggr_size;
u32 limit;
@@ -304,7 +304,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
rx_config->aggr_hard_limit);
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
- limit = ipa_reg_field_max(reg, BYTE_LIMIT);
+ limit = reg_field_max(reg, BYTE_LIMIT);
if (aggr_size > limit) {
dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
data->endpoint_id, aggr_size, limit);
@@ -447,7 +447,7 @@ static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 field_id;
u32 offset;
bool state;
@@ -460,11 +460,11 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
WARN_ON(ipa->version >= IPA_VERSION_4_0);
reg = ipa_reg(ipa, ENDP_INIT_CTRL);
- offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
- mask = ipa_reg_bit(reg, field_id);
+ mask = reg_bit(reg, field_id);
state = !!(val & mask);
@@ -493,13 +493,13 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
- val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
return !!(val & BIT(endpoint_id % 32));
}
@@ -510,12 +510,12 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
u32 mask = BIT(endpoint_id % 32);
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
- iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
}
/**
@@ -613,7 +613,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
/* We only reset modem TX endpoints */
@@ -622,7 +622,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
continue;
reg = ipa_reg(ipa, ENDP_STATUS);
- offset = ipa_reg_n_offset(reg, endpoint_id);
+ offset = reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
@@ -645,7 +645,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_CFG);
@@ -658,7 +658,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
/* Checksum header offset is in 4-byte units */
off = sizeof(struct rmnet_map_header) / sizeof(u32);
- val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
+ val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
@@ -671,26 +671,26 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
- val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
+ val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
reg = ipa_reg(ipa, ENDP_INIT_NAT);
- val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
+ val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static u32
@@ -716,13 +716,13 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
static u32 ipa_header_size_encode(enum ipa_version version,
- const struct ipa_reg *reg, u32 header_size)
+ const struct reg *reg, u32 header_size)
{
- u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
+ u32 field_max = reg_field_max(reg, HDR_LEN);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
- val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
+ val = reg_encode(reg, HDR_LEN, header_size & field_max);
if (version < IPA_VERSION_4_5) {
WARN_ON(header_size > field_max);
return val;
@@ -730,21 +730,21 @@ static u32 ipa_header_size_encode(enum ipa_version version,
/* IPA v4.5 adds a few more most-significant bits */
header_size >>= hweight32(field_max);
- WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
- val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
+ WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
+ val |= reg_encode(reg, HDR_LEN_MSB, header_size);
return val;
}
/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
static u32 ipa_metadata_offset_encode(enum ipa_version version,
- const struct ipa_reg *reg, u32 offset)
+ const struct reg *reg, u32 offset)
{
- u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
+ u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
- val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
+ val = reg_encode(reg, HDR_OFST_METADATA, offset);
if (version < IPA_VERSION_4_5) {
WARN_ON(offset > field_max);
return val;
@@ -752,8 +752,8 @@ static u32 ipa_metadata_offset_encode(enum ipa_version version,
/* IPA v4.5 adds a few more most-significant bits */
offset >>= hweight32(field_max);
- WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
- val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
+ WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
+ val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
return val;
}
@@ -783,7 +783,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR);
@@ -806,13 +806,13 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
off = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version >= IPA_VERSION_4_5)
- off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
- val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
- val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
+ val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
- val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
+ val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
@@ -820,7 +820,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
@@ -828,13 +828,13 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
u32 pad_align = endpoint->config.rx.pad_align;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
- val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
+ val |= reg_bit(reg, HDR_ENDIANNESS); /* big endian */
/* A QMAP header contains a 6 bit pad field at offset 0.
* The RMNet driver assumes this field is meaningful in
@@ -844,16 +844,16 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
* (although 0) should be ignored.
*/
if (!endpoint->toward_ipa) {
- val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
+ val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
+ val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
}
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
if (!endpoint->toward_ipa)
- val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
+ val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
@@ -861,25 +861,25 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->config.qmap && !endpoint->toward_ipa) {
- u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
u32 off; /* Field offset within header */
off = offsetof(struct rmnet_map_header, pkt_len);
/* Low bits are in the ENDP_INIT_HDR register */
off >>= hweight32(mask);
- val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
u32 offset;
@@ -887,7 +887,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
return; /* Register not valid for TX endpoints */
reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
- offset = ipa_reg_n_offset(reg, endpoint_id);
+ offset = reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->config.qmap)
@@ -899,7 +899,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -911,82 +911,90 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
- val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
- val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
+ val = reg_encode(reg, ENDP_MODE, IPA_DMA);
+ val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
} else {
- val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
+ val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
}
/* All other bits unspecified (and 0) */
- offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
iowrite32(val, ipa->reg_virt + offset);
}
-/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
- * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
- * they're configured to have granularity 100 usec and 1 msec, respectively.
- *
- * The return value is the positive or negative Qtime value to use to
- * express the (microsecond) time provided. A positive return value
- * means pulse generator 0 can be used; otherwise use pulse generator 1.
+/* For IPA v4.5+, times are expressed using Qtime. A time is represented
+ * at one of several available granularities, which are configured in
+ * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
+ * generators are set up with different "tick" periods. A Qtime value
+ * encodes a tick count along with an indication of a pulse generator
+ * (which has a fixed tick period). Two pulse generators are always
+ * available to the AP; a third is available starting with IPA v5.0.
+ * This function determines which pulse generator most accurately
+ * represents the time period provided, and returns the tick count to
+ * use to represent that time.
*/
-static int ipa_qtime_val(u32 microseconds, u32 max)
-{
- u32 val;
-
- /* Use 100 microsecond granularity if possible */
- val = DIV_ROUND_CLOSEST(microseconds, 100);
- if (val <= max)
- return (int)val;
-
- /* Have to use pulse generator 1 (millisecond granularity) */
- val = DIV_ROUND_CLOSEST(microseconds, 1000);
- WARN_ON(val > max);
+static u32
+ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
+{
+ u32 which = 0;
+ u32 ticks;
+
+ /* Pulse generator 0 has 100 microsecond granularity */
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (ticks <= max)
+ goto out;
+
+ /* Pulse generator 1 has millisecond granularity */
+ which = 1;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
+ if (ticks <= max)
+ goto out;
+
+ if (ipa->version >= IPA_VERSION_5_0) {
+ /* Pulse generator 2 has 10 millisecond granularity */
+ which = 2;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ }
+ WARN_ON(ticks > max);
+out:
+ *select = which;
- return (int)-val;
+ return ticks;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
-static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
+static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
+ u32 ticks;
u32 max;
- u32 val;
if (!microseconds)
return 0; /* Nothing to compute if time limit is 0 */
- max = ipa_reg_field_max(reg, TIME_LIMIT);
+ max = reg_field_max(reg, TIME_LIMIT);
if (ipa->version >= IPA_VERSION_4_5) {
- u32 gran_sel;
- int ret;
-
- /* Compute the Qtime limit value to use */
- ret = ipa_qtime_val(microseconds, max);
- if (ret < 0) {
- val = -ret;
- gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
- } else {
- val = ret;
- gran_sel = 0;
- }
+ u32 select;
+
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
- return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
+ return reg_encode(reg, AGGR_GRAN_SEL, select) |
+ reg_encode(reg, TIME_LIMIT, ticks);
}
/* We program aggregation granularity in ipa_hardware_config() */
- val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
- WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
+ ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
+ WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
microseconds, max * IPA_AGGR_GRANULARITY);
- return ipa_reg_encode(reg, TIME_LIMIT, val);
+ return reg_encode(reg, TIME_LIMIT, ticks);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
@@ -997,13 +1005,13 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
u32 limit;
rx_config = &endpoint->config.rx;
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
- val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
buffer_size = rx_config->buffer_size;
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
- val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
+ val |= reg_encode(reg, BYTE_LIMIT, limit);
limit = rx_config->aggr_time_limit;
val |= aggr_time_limit_encode(ipa, reg, limit);
@@ -1011,20 +1019,20 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* AGGR_PKT_LIMIT is 0 (unlimited) */
if (rx_config->aggr_close_eof)
- val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
+ val |= reg_bit(reg, SW_EOF_ACTIVE);
} else {
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
- val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
+ val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
/* other fields ignored */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
@@ -1035,7 +1043,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
* Return the encoded value representing the timeout period provided
* that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/
-static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
+static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
u32 width;
@@ -1049,21 +1057,14 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
return 0; /* Nothing to compute if timer period is 0 */
if (ipa->version >= IPA_VERSION_4_5) {
- u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
- u32 gran_sel;
- int ret;
-
- /* Compute the Qtime limit value to use */
- ret = ipa_qtime_val(microseconds, max);
- if (ret < 0) {
- val = -ret;
- gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
- } else {
- val = ret;
- gran_sel = 0;
- }
+ u32 max = reg_field_max(reg, TIMER_LIMIT);
+ u32 select;
+ u32 ticks;
- return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
+
+ return reg_encode(reg, TIMER_GRAN_SEL, 1) |
+ reg_encode(reg, TIMER_LIMIT, ticks);
}
/* Use 64 bit arithmetic to avoid overflow */
@@ -1071,11 +1072,11 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
/* We still need the result to fit into the field */
- WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
+ WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */
if (ipa->version < IPA_VERSION_4_2)
- return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
+ return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and
* scale fields within the 32-bit timer register, where:
@@ -1086,7 +1087,7 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
* such that high bit is included.
*/
high = fls(ticks); /* 1..32 (or warning above) */
- width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
+ width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0;
if (scale) {
/* If we're scaling, round up to get a closer result */
@@ -1096,8 +1097,8 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
scale++;
}
- val = ipa_reg_encode(reg, TIMER_SCALE, scale);
- val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
+ val = reg_encode(reg, TIMER_SCALE, scale);
+ val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
@@ -1108,14 +1109,14 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
val = hol_block_timer_encode(ipa, reg, microseconds);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void
@@ -1123,13 +1124,13 @@ ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
- offset = ipa_reg_n_offset(reg, endpoint_id);
- val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
+ offset = reg_n_offset(reg, endpoint_id);
+ val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
iowrite32(val, ipa->reg_virt + offset);
@@ -1170,7 +1171,7 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
if (!endpoint->toward_ipa)
@@ -1182,7 +1183,7 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
@@ -1190,20 +1191,20 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
u32 resource_group = endpoint->config.resource_group;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
- val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
+ val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
@@ -1212,14 +1213,14 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
reg = ipa_reg(ipa, ENDP_INIT_SEQ);
/* Low-order byte configures primary packet processing */
- val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
+ val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
/* Second byte (if supported) configures replicated packet processing */
if (ipa->version < IPA_VERSION_4_5)
- val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
- endpoint->config.tx.seq_rep_type);
+ val |= reg_encode(reg, SEQ_REP_TYPE,
+ endpoint->config.tx.seq_rep_type);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/**
@@ -1269,12 +1270,12 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_STATUS);
if (endpoint->config.status_enable) {
- val |= ipa_reg_bit(reg, STATUS_EN);
+ val |= reg_bit(reg, STATUS_EN);
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
@@ -1282,8 +1283,7 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
- val |= ipa_reg_encode(reg, STATUS_ENDP,
- status_endpoint_id);
+ val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
}
/* STATUS_LOCATION is 0, meaning IPA packet status
* precedes the packet (not present for IPA v4.5+)
@@ -1291,7 +1291,7 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
@@ -1635,18 +1635,18 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
- val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
- val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
+ val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
/* ROUTE_DEF_HDR_OFST is 0 */
- val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
- val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
+ val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
@@ -1984,8 +1984,9 @@ void ipa_endpoint_deconfig(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 endpoint_id;
+ u32 hw_limit;
u32 tx_count;
u32 rx_count;
u32 rx_base;
@@ -2017,12 +2018,12 @@ int ipa_endpoint_config(struct ipa *ipa)
* the highest one doesn't exceed the number supported by software.
*/
reg = ipa_reg(ipa, FLAVOR_0);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ val = ioread32(ipa->reg_virt + reg_offset(reg));
/* Our RX is an IPA producer; our TX is an IPA consumer. */
- tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
- rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
- rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
+ tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
+ rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
+ rx_base = reg_decode(reg, PROD_LOWEST, val);
limit = rx_base + rx_count;
if (limit > IPA_ENDPOINT_MAX) {
@@ -2031,6 +2032,14 @@ int ipa_endpoint_config(struct ipa *ipa)
return -EINVAL;
}
+ /* Until IPA v5.0, the max endpoint ID was 32 */
+ hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
+ if (limit > hw_limit) {
+ dev_err(dev, "unexpected endpoint count, %u > %u\n",
+ limit, hw_limit);
+ return -EINVAL;
+ }
+
/* Allocate and initialize the available endpoint bitmap */
ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
if (!ipa->available)
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 4a5c3bc549df..3ad2e802040a 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
@@ -38,7 +38,7 @@ enum ipa_endpoint_name {
IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
};
-#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
+#define IPA_ENDPOINT_MAX 36 /* Max supported by driver */
/**
* struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index c19cd27ac852..4bc05948f772 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include "ipa.h"
#include "ipa_reg.h"
@@ -46,12 +47,12 @@ struct ipa_interrupt {
static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
struct ipa *ipa = interrupt->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 mask = BIT(irq_id);
u32 offset;
reg = ipa_reg(ipa, IPA_IRQ_CLR);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
switch (irq_id) {
case IPA_IRQ_UC_0:
@@ -84,7 +85,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
- const struct ipa_reg *reg;
+ const struct reg *reg;
struct device *dev;
u32 pending;
u32 offset;
@@ -101,7 +102,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
* only the enabled ones.
*/
reg = ipa_reg(ipa, IPA_IRQ_STTS);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
@@ -119,8 +120,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
reg = ipa_reg(ipa, IPA_IRQ_CLR);
- offset = ipa_reg_offset(reg);
- iowrite32(pending, ipa->reg_virt + offset);
+ iowrite32(pending, ipa->reg_virt + reg_offset(reg));
}
out_power_put:
pm_runtime_mark_last_busy(dev);
@@ -131,9 +131,9 @@ out_power_put:
static void ipa_interrupt_enabled_update(struct ipa *ipa)
{
- const struct ipa_reg *reg = ipa_reg(ipa, IPA_IRQ_EN);
+ const struct reg *reg = ipa_reg(ipa, IPA_IRQ_EN);
- iowrite32(ipa->interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(ipa->interrupt->enabled, ipa->reg_virt + reg_offset(reg));
}
/* Enable an IPA interrupt type */
@@ -169,7 +169,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id % 32);
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -180,7 +180,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
return;
reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
- offset = ipa_reg_n_offset(reg, unit);
+ offset = reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset);
if (enable)
@@ -214,18 +214,18 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
unit_count = roundup(ipa->endpoint_count, 32);
for (unit = 0; unit < unit_count; unit++) {
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
- val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
continue;
reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
}
}
@@ -240,7 +240,7 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
- const struct ipa_reg *reg;
+ const struct reg *reg;
unsigned int irq;
int ret;
@@ -260,7 +260,7 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
/* Start with all IPA interrupts disabled */
reg = ipa_reg(ipa, IPA_IRQ_EN);
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
@@ -269,9 +269,9 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
goto err_kfree;
}
- ret = enable_irq_wake(irq);
+ ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
- dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret);
+ dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret);
goto err_free_irq;
}
@@ -289,11 +289,8 @@ err_kfree:
void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
{
struct device *dev = &interrupt->ipa->pdev->dev;
- int ret;
- ret = disable_irq_wake(interrupt->irq);
- if (ret)
- dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret);
+ dev_pm_clear_wake_irq(dev);
free_irq(interrupt->irq, interrupt);
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 4fb92f771974..6cb7bf96a626 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -203,7 +203,7 @@ static void ipa_teardown(struct ipa *ipa)
static void
ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* IPA v4.5+ has no backward compatibility register */
@@ -212,13 +212,13 @@ ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
reg = ipa_reg(ipa, IPA_BCR);
val = data->backward_compat;
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
static void ipa_hardware_config_tx(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -227,11 +227,11 @@ static void ipa_hardware_config_tx(struct ipa *ipa)
/* Disable PA mask to allow HOLB drop */
reg = ipa_reg(ipa, IPA_TX_CFG);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
- val &= ~ipa_reg_bit(reg, PA_MASK_EN);
+ val &= ~reg_bit(reg, PA_MASK_EN);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -239,7 +239,7 @@ static void ipa_hardware_config_tx(struct ipa *ipa)
static void ipa_hardware_config_clkon(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (version >= IPA_VERSION_4_5)
@@ -252,20 +252,20 @@ static void ipa_hardware_config_clkon(struct ipa *ipa)
reg = ipa_reg(ipa, CLKON_CFG);
if (version == IPA_VERSION_3_1) {
/* Disable MISC clock gating */
- val = ipa_reg_bit(reg, CLKON_MISC);
+ val = reg_bit(reg, CLKON_MISC);
} else { /* IPA v4.0+ */
/* Enable open global clocks in the CLKON configuration */
- val = ipa_reg_bit(reg, CLKON_GLOBAL);
- val |= ipa_reg_bit(reg, GLOBAL_2X_CLK);
+ val = reg_bit(reg, CLKON_GLOBAL);
+ val |= reg_bit(reg, GLOBAL_2X_CLK);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -274,21 +274,22 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
return;
reg = ipa_reg(ipa, COMP_CFG);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
+
val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
- val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
- val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
- val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
} else if (ipa->version < IPA_VERSION_4_5) {
- val |= ipa_reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
+ val |= reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
} else {
/* For IPA v4.5 FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
}
- val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
- val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
+ val |= reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
+ val |= reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -299,7 +300,7 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
{
const struct ipa_qsb_data *data0;
const struct ipa_qsb_data *data1;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
@@ -310,29 +311,27 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
/* Max outstanding write accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_WRITES);
- val = ipa_reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
+ val = reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
if (data->qsb_count > 1)
- val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_WRITES,
- data1->max_writes);
+ val |= reg_encode(reg, GEN_QMB_1_MAX_WRITES, data1->max_writes);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_READS);
- val = ipa_reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
+ val = reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= ipa_reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
- data0->max_reads_beats);
+ val |= reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
+ data0->max_reads_beats);
if (data->qsb_count > 1) {
- val = ipa_reg_encode(reg, GEN_QMB_1_MAX_READS,
- data1->max_reads);
+ val = reg_encode(reg, GEN_QMB_1_MAX_READS, data1->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
- data1->max_reads_beats);
+ val |= reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
+ data1->max_reads_beats);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* The internal inactivity timer clock is used for the aggregation timer */
@@ -368,41 +367,47 @@ static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
*/
static void ipa_qtime_config(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
- val = ipa_reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
- val |= ipa_reg_bit(reg, DPL_TIMESTAMP_SEL);
+ val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
+ val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
/* Configure tag and NAT Qtime timestamp resolution as well */
- val = ipa_reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
- val = ipa_reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
+ val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
+ val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
- val = ipa_reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
- val |= ipa_reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
- val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+ val = reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
+ val |= reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
+ if (ipa->version >= IPA_VERSION_5_0) {
+ val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
+ val |= reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
+ } else {
+ val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+ }
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Actual divider is 1 more than value supplied here */
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
- offset = ipa_reg_offset(reg);
- val = ipa_reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
+ offset = reg_offset(reg);
+
+ val = reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
- val |= ipa_reg_bit(reg, DIV_ENABLE);
+ val |= reg_bit(reg, DIV_ENABLE);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -411,13 +416,13 @@ static void ipa_qtime_config(struct ipa *ipa)
static void ipa_hardware_config_counter(struct ipa *ipa)
{
u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, COUNTER_CFG);
/* If defined, EOT_COAL_GRANULARITY is 0 */
- val = ipa_reg_encode(reg, AGGR_GRANULARITY, granularity);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ val = reg_encode(reg, AGGR_GRANULARITY, granularity);
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
static void ipa_hardware_config_timing(struct ipa *ipa)
@@ -430,8 +435,13 @@ static void ipa_hardware_config_timing(struct ipa *ipa)
static void ipa_hardware_config_hashing(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
+ /* Other than IPA v4.2, all versions enable "hashing". Starting
+ * with IPA v5.0, the filter and router tables are implemented
+ * differently, but the default configuration enables this feature
+ * (now referred to as "cacheing"), so there's nothing to do here.
+ */
if (ipa->version != IPA_VERSION_4_2)
return;
@@ -441,26 +451,26 @@ static void ipa_hardware_config_hashing(struct ipa *ipa)
/* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
* IPV4_FILTER_HASH are all zero.
*/
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
}
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (ipa->version < IPA_VERSION_3_5_1)
return;
reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
- val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
- enter_idle_debounce_thresh);
+ val = reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
+ enter_idle_debounce_thresh);
if (const_non_idle_enable)
- val |= ipa_reg_bit(reg, CONST_NON_IDLE_ENABLE);
+ val |= reg_bit(reg, CONST_NON_IDLE_ENABLE);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/**
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 9ec5af323f73..85096d1efe5b 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -75,7 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
- const struct ipa_reg *reg;
+ const struct reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
@@ -115,8 +115,8 @@ int ipa_mem_setup(struct ipa *ipa)
offset = ipa->mem_offset + mem->offset;
reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
- val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ val = reg_encode(reg, IPA_BASE_ADDR, offset);
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
return 0;
}
@@ -163,6 +163,12 @@ static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
return false;
break;
+ case IPA_MEM_AP_V4_FILTER:
+ case IPA_MEM_AP_V6_FILTER:
+ if (version != IPA_VERSION_5_0)
+ return false;
+ break;
+
case IPA_MEM_NAT_TABLE:
case IPA_MEM_STATS_FILTER_ROUTE:
if (version < IPA_VERSION_4_5)
@@ -312,8 +318,8 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_reg *reg;
const struct ipa_mem *mem;
+ const struct reg *reg;
dma_addr_t addr;
u32 mem_size;
void *virt;
@@ -322,13 +328,13 @@ int ipa_mem_config(struct ipa *ipa)
/* Check the advertised location and size of the shared memory area */
reg = ipa_reg(ipa, SHARED_MEM_SIZE);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ val = ioread32(ipa->reg_virt + reg_offset(reg));
/* The fields in the register are in 8 byte units */
- ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
+ ipa->mem_offset = 8 * reg_decode(reg, MEM_BADDR, val);
/* Make sure the end is within the region's mapped space */
- mem_size = 8 * ipa_reg_decode(reg, MEM_SIZE, val);
+ mem_size = 8 * reg_decode(reg, MEM_SIZE, val);
/* If the sizes don't match, issue a warning */
if (ipa->mem_offset + mem_size < ipa->mem_size) {
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 570bfdd99bff..868e9c20e8c4 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
@@ -62,13 +62,15 @@ enum ipa_mem_id {
IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0+) */
IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0+) */
IPA_MEM_STATS_QUOTA_AP, /* 0 canaries, optional (IPA v4.0+) */
- IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries, optional (IPA v4.0+) */
IPA_MEM_STATS_DROP, /* 0 canaries, optional (IPA v4.0+) */
- /* The next 5 filter and route statistics regions are optional */
+ /* The next 7 filter and route statistics regions are optional */
IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_AP_V4_FILTER, /* 2 canaries (IPA v5.0) */
+ IPA_MEM_AP_V6_FILTER, /* 0 canaries (IPA v5.0) */
IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5+) */
IPA_MEM_NAT_TABLE, /* 4 canaries, optional (IPA v4.5+) */
IPA_MEM_END_MARKER, /* 1 canary (not a real region) */
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index ddd529153e15..735fa6591609 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -9,73 +9,96 @@
#include "ipa.h"
#include "ipa_reg.h"
-/* Is this register valid and defined for the current IPA version? */
-static bool ipa_reg_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
+/* Is this register ID valid for the current IPA version? */
+static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
{
enum ipa_version version = ipa->version;
- bool valid;
-
- /* Check for bogus (out of range) register IDs */
- if ((u32)reg_id >= ipa->regs->reg_count)
- return false;
switch (reg_id) {
case IPA_BCR:
case COUNTER_CFG:
- valid = version < IPA_VERSION_4_5;
- break;
+ return version < IPA_VERSION_4_5;
case IPA_TX_CFG:
case FLAVOR_0:
case IDLE_INDICATION_CFG:
- valid = version >= IPA_VERSION_3_5;
- break;
+ return version >= IPA_VERSION_3_5;
case QTIME_TIMESTAMP_CFG:
case TIMERS_XO_CLK_DIV_CFG:
case TIMERS_PULSE_GRAN_CFG:
- valid = version >= IPA_VERSION_4_5;
- break;
+ return version >= IPA_VERSION_4_5;
case SRC_RSRC_GRP_45_RSRC_TYPE:
case DST_RSRC_GRP_45_RSRC_TYPE:
- valid = version <= IPA_VERSION_3_1 ||
- version == IPA_VERSION_4_5;
- break;
+ return version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_4_5;
case SRC_RSRC_GRP_67_RSRC_TYPE:
case DST_RSRC_GRP_67_RSRC_TYPE:
- valid = version <= IPA_VERSION_3_1;
- break;
+ return version <= IPA_VERSION_3_1;
case ENDP_FILTER_ROUTER_HSH_CFG:
- valid = version != IPA_VERSION_4_2;
- break;
+ return version != IPA_VERSION_4_2;
case IRQ_SUSPEND_EN:
case IRQ_SUSPEND_CLR:
- valid = version >= IPA_VERSION_3_1;
- break;
+ return version >= IPA_VERSION_3_1;
+
+ case COMP_CFG:
+ case CLKON_CFG:
+ case ROUTE:
+ case SHARED_MEM_SIZE:
+ case QSB_MAX_WRITES:
+ case QSB_MAX_READS:
+ case FILT_ROUT_HASH_EN:
+ case FILT_ROUT_CACHE_CFG:
+ case FILT_ROUT_HASH_FLUSH:
+ case FILT_ROUT_CACHE_FLUSH:
+ case STATE_AGGR_ACTIVE:
+ case LOCAL_PKT_PROC_CNTXT:
+ case AGGR_FORCE_CLOSE:
+ case SRC_RSRC_GRP_01_RSRC_TYPE:
+ case SRC_RSRC_GRP_23_RSRC_TYPE:
+ case DST_RSRC_GRP_01_RSRC_TYPE:
+ case DST_RSRC_GRP_23_RSRC_TYPE:
+ case ENDP_INIT_CTRL:
+ case ENDP_INIT_CFG:
+ case ENDP_INIT_NAT:
+ case ENDP_INIT_HDR:
+ case ENDP_INIT_HDR_EXT:
+ case ENDP_INIT_HDR_METADATA_MASK:
+ case ENDP_INIT_MODE:
+ case ENDP_INIT_AGGR:
+ case ENDP_INIT_HOL_BLOCK_EN:
+ case ENDP_INIT_HOL_BLOCK_TIMER:
+ case ENDP_INIT_DEAGGR:
+ case ENDP_INIT_RSRC_GRP:
+ case ENDP_INIT_SEQ:
+ case ENDP_STATUS:
+ case ENDP_FILTER_CACHE_CFG:
+ case ENDP_ROUTER_CACHE_CFG:
+ case IPA_IRQ_STTS:
+ case IPA_IRQ_EN:
+ case IPA_IRQ_CLR:
+ case IPA_IRQ_UC:
+ case IRQ_SUSPEND_INFO:
+ return true; /* These should be defined for all versions */
default:
- valid = true; /* Others should be defined for all versions */
- break;
+ return false;
}
-
- /* To be valid, it must be defined */
-
- return valid && ipa->regs->reg[reg_id];
}
-const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
+const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
{
- if (WARN_ON(!ipa_reg_valid(ipa, reg_id)))
+ if (WARN(!ipa_reg_id_valid(ipa, reg_id), "invalid reg %u\n", reg_id))
return NULL;
- return ipa->regs->reg[reg_id];
+ return reg(ipa->regs, reg_id);
}
-static const struct ipa_regs *ipa_regs(enum ipa_version version)
+static const struct regs *ipa_regs(enum ipa_version version)
{
switch (version) {
case IPA_VERSION_3_1:
@@ -100,7 +123,7 @@ static const struct ipa_regs *ipa_regs(enum ipa_version version)
int ipa_reg_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_regs *regs;
+ const struct regs *regs;
struct resource *res;
regs = ipa_regs(ipa->version);
@@ -123,7 +146,6 @@ int ipa_reg_init(struct ipa *ipa)
dev_err(dev, "unable to remap \"ipa-reg\" memory\n");
return -ENOMEM;
}
- ipa->reg_addr = res->start;
ipa->regs = regs;
return 0;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index b1a3c2c7e167..28aa1351dd48 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
@@ -10,6 +10,7 @@
#include <linux/bug.h>
#include "ipa_version.h"
+#include "reg.h"
struct ipa;
@@ -35,7 +36,7 @@ struct ipa;
* by register ID. Each entry in the array specifies the base offset and
* (for parameterized registers) a non-zero stride value. Not all versions
* of IPA define all registers. The offset for a register is returned by
- * ipa_reg_offset() when the register's ipa_reg structure is supplied;
+ * reg_offset() when the register's ipa_reg structure is supplied;
* zero is returned for an undefined register (this should never happen).
*
* Some registers encode multiple fields within them. Each field in
@@ -44,9 +45,9 @@ struct ipa;
* an array of field masks, indexed by field ID. Two functions are
* used to access register fields; both take an ipa_reg structure as
* argument. To encode a value to be represented in a register field,
- * the value and field ID are passed to ipa_reg_encode(). To extract
+ * the value and field ID are passed to reg_encode(). To extract
* a value encoded in a register field, the field ID is passed to
- * ipa_reg_decode(). In addition, for single-bit fields, ipa_reg_bit()
+ * reg_decode(). In addition, for single-bit fields, reg_bit()
* can be used to either encode the bit value, or to generate a mask
* used to extract the bit value.
*/
@@ -59,8 +60,10 @@ enum ipa_reg_id {
SHARED_MEM_SIZE,
QSB_MAX_WRITES,
QSB_MAX_READS,
- FILT_ROUT_HASH_EN,
- FILT_ROUT_HASH_FLUSH,
+ FILT_ROUT_HASH_EN, /* Not IPA v5.0+ */
+ FILT_ROUT_CACHE_CFG, /* IPA v5.0+ */
+ FILT_ROUT_HASH_FLUSH, /* Not IPA v5.0+ */
+ FILT_ROUT_CACHE_FLUSH, /* IPA v5.0+ */
STATE_AGGR_ACTIVE,
IPA_BCR, /* Not IPA v4.5+ */
LOCAL_PKT_PROC_CNTXT,
@@ -95,7 +98,9 @@ enum ipa_reg_id {
ENDP_INIT_SEQ, /* TX only */
ENDP_STATUS,
ENDP_FILTER_ROUTER_HSH_CFG, /* Not IPA v4.2 */
- /* The IRQ registers are only used for GSI_EE_AP */
+ ENDP_FILTER_CACHE_CFG, /* IPA v5.0+ */
+ ENDP_ROUTER_CACHE_CFG, /* IPA v5.0+ */
+ /* The IRQ registers that follow are only used for GSI_EE_AP */
IPA_IRQ_STTS,
IPA_IRQ_EN,
IPA_IRQ_CLR,
@@ -106,56 +111,6 @@ enum ipa_reg_id {
IPA_REG_ID_COUNT, /* Last; not an ID */
};
-/**
- * struct ipa_reg - An IPA register descriptor
- * @offset: Register offset relative to base of the "ipa-reg" memory
- * @stride: Distance between two instances, if parameterized
- * @fcount: Number of entries in the @fmask array
- * @fmask: Array of mask values defining position and width of fields
- * @name: Upper-case name of the IPA register
- */
-struct ipa_reg {
- u32 offset;
- u32 stride;
- u32 fcount;
- const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
- const char *name;
-};
-
-/* Helper macro for defining "simple" (non-parameterized) registers */
-#define IPA_REG(__NAME, __reg_id, __offset) \
- IPA_REG_STRIDE(__NAME, __reg_id, __offset, 0)
-
-/* Helper macro for defining parameterized registers, specifying stride */
-#define IPA_REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
- static const struct ipa_reg ipa_reg_ ## __reg_id = { \
- .name = #__NAME, \
- .offset = __offset, \
- .stride = __stride, \
- }
-
-#define IPA_REG_FIELDS(__NAME, __name, __offset) \
- IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
-
-#define IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
- static const struct ipa_reg ipa_reg_ ## __name = { \
- .name = #__NAME, \
- .offset = __offset, \
- .stride = __stride, \
- .fcount = ARRAY_SIZE(ipa_reg_ ## __name ## _fmask), \
- .fmask = ipa_reg_ ## __name ## _fmask, \
- }
-
-/**
- * struct ipa_regs - Description of registers supported by hardware
- * @reg_count: Number of registers in the @reg[] array
- * @reg: Array of register descriptors
- */
-struct ipa_regs {
- u32 reg_count;
- const struct ipa_reg **reg;
-};
-
/* COMP_CFG register */
enum ipa_reg_comp_cfg_field_id {
COMP_CFG_ENABLE, /* Not IPA v4.0+ */
@@ -251,14 +206,28 @@ enum ipa_reg_qsb_max_reads_field_id {
GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
};
+/* FILT_ROUT_CACHE_CFG register */
+enum ipa_reg_filt_rout_cache_cfg_field_id {
+ ROUTER_CACHE_EN,
+ FILTER_CACHE_EN,
+ LOW_PRI_HASH_HIT_DISABLE,
+ LRU_EVICTION_THRESHOLD,
+};
+
/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
-enum ipa_reg_rout_hash_field_id {
+enum ipa_reg_filt_rout_hash_field_id {
IPV6_ROUTER_HASH,
IPV6_FILTER_HASH,
IPV4_ROUTER_HASH,
IPV4_FILTER_HASH,
};
+/* FILT_ROUT_CACHE_FLUSH register */
+enum ipa_reg_filt_rout_cache_field_id {
+ ROUTER_CACHE,
+ FILTER_CACHE,
+};
+
/* BCR register */
enum ipa_bcr_compat {
BCR_CMDQ_L_LACK_ONE_ENTRY = 0x0, /* Not IPA v4.2+ */
@@ -298,6 +267,7 @@ enum ipa_reg_ipa_tx_cfg_field_id {
DUAL_TX_ENABLE, /* v4.5+ */
SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
SSPND_PA_NO_BQ_STATE, /* v4.2 only */
+ HOLB_STICKY_DROP_EN, /* v5.0+ */
};
/* FLAVOR_0 register */
@@ -333,6 +303,7 @@ enum ipa_reg_timers_pulse_gran_cfg_field_id {
PULSE_GRAN_0,
PULSE_GRAN_1,
PULSE_GRAN_2,
+ PULSE_GRAN_3,
};
/* Values for IPA_GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
@@ -415,6 +386,8 @@ enum ipa_reg_endp_init_hdr_ext_field_id {
HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
+ HDR_BYTES_TO_REMOVE_VALID, /* v5.0+ */
+ HDR_BYTES_TO_REMOVE, /* v5.0+ */
};
/* ENDP_INIT_MODE register */
@@ -573,6 +546,17 @@ enum ipa_reg_endp_filter_router_hsh_cfg_field_id {
ROUTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
};
+/* ENDP_FILTER_CACHE_CFG and ENDP_ROUTER_CACHE_CFG registers */
+enum ipa_reg_endp_cache_cfg_field_id {
+ CACHE_MSK_SRC_ID,
+ CACHE_MSK_SRC_IP,
+ CACHE_MSK_DST_IP,
+ CACHE_MSK_SRC_PORT,
+ CACHE_MSK_DST_PORT,
+ CACHE_MSK_PROTOCOL,
+ CACHE_MSK_METADATA,
+};
+
/* IPA_IRQ_STTS, IPA_IRQ_EN, and IPA_IRQ_CLR registers */
/**
* enum ipa_irq_id - Bit positions representing type of IPA IRQ
@@ -654,79 +638,15 @@ enum ipa_reg_ipa_irq_uc_field_id {
UC_INTR,
};
-extern const struct ipa_regs ipa_regs_v3_1;
-extern const struct ipa_regs ipa_regs_v3_5_1;
-extern const struct ipa_regs ipa_regs_v4_2;
-extern const struct ipa_regs ipa_regs_v4_5;
-extern const struct ipa_regs ipa_regs_v4_7;
-extern const struct ipa_regs ipa_regs_v4_9;
-extern const struct ipa_regs ipa_regs_v4_11;
-
-/* Return the field mask for a field in a register */
-static inline u32 ipa_reg_fmask(const struct ipa_reg *reg, u32 field_id)
-{
- if (!reg || WARN_ON(field_id >= reg->fcount))
- return 0;
-
- return reg->fmask[field_id];
-}
-
-/* Return the mask for a single-bit field in a register */
-static inline u32 ipa_reg_bit(const struct ipa_reg *reg, u32 field_id)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- WARN_ON(!is_power_of_2(fmask));
-
- return fmask;
-}
-
-/* Encode a value into the given field of a register */
-static inline u32
-ipa_reg_encode(const struct ipa_reg *reg, u32 field_id, u32 val)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- if (!fmask)
- return 0;
-
- val <<= __ffs(fmask);
- if (WARN_ON(val & ~fmask))
- return 0;
-
- return val;
-}
-
-/* Given a register value, decode (extract) the value in the given field */
-static inline u32
-ipa_reg_decode(const struct ipa_reg *reg, u32 field_id, u32 val)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- return fmask ? (val & fmask) >> __ffs(fmask) : 0;
-}
-
-/* Return the maximum value representable by the given field; always 2^n - 1 */
-static inline u32 ipa_reg_field_max(const struct ipa_reg *reg, u32 field_id)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- return fmask ? fmask >> __ffs(fmask) : 0;
-}
-
-const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
-
-/* Returns 0 for NULL reg; warning will have already been issued */
-static inline u32 ipa_reg_offset(const struct ipa_reg *reg)
-{
- return reg ? reg->offset : 0;
-}
+extern const struct regs ipa_regs_v3_1;
+extern const struct regs ipa_regs_v3_5_1;
+extern const struct regs ipa_regs_v4_2;
+extern const struct regs ipa_regs_v4_5;
+extern const struct regs ipa_regs_v4_7;
+extern const struct regs ipa_regs_v4_9;
+extern const struct regs ipa_regs_v4_11;
-/* Returns 0 for NULL reg; warning will have already been issued */
-static inline u32 ipa_reg_n_offset(const struct ipa_reg *reg, u32 n)
-{
- return reg ? reg->offset + n * reg->stride : 0;
-}
+const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
int ipa_reg_init(struct ipa *ipa);
void ipa_reg_exit(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index a257f0e5e361..82c88a744d10 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -70,20 +70,20 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
static void
ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
- const struct ipa_reg *reg,
+ const struct reg *reg,
const struct ipa_resource_limits *xlimits,
const struct ipa_resource_limits *ylimits)
{
u32 val;
- val = ipa_reg_encode(reg, X_MIN_LIM, xlimits->min);
- val |= ipa_reg_encode(reg, X_MAX_LIM, xlimits->max);
+ val = reg_encode(reg, X_MIN_LIM, xlimits->min);
+ val |= reg_encode(reg, X_MAX_LIM, xlimits->max);
if (ylimits) {
- val |= ipa_reg_encode(reg, Y_MIN_LIM, ylimits->min);
- val |= ipa_reg_encode(reg, Y_MAX_LIM, ylimits->max);
+ val |= reg_encode(reg, Y_MIN_LIM, ylimits->min);
+ val |= reg_encode(reg, Y_MAX_LIM, ylimits->max);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, resource_type));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, resource_type));
}
static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
@@ -92,7 +92,7 @@ static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_src_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- const struct ipa_reg *reg;
+ const struct reg *reg;
resource = &data->resource_src[resource_type];
@@ -129,7 +129,7 @@ static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_dst_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- const struct ipa_reg *reg;
+ const struct reg *reg;
resource = &data->resource_dst[resource_type];
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index b81e27b61354..f0529c31d0b6 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -345,9 +345,8 @@ void ipa_table_reset(struct ipa *ipa, bool modem)
int ipa_table_hash_flush(struct ipa *ipa)
{
- const struct ipa_reg *reg;
struct gsi_trans *trans;
- u32 offset;
+ const struct reg *reg;
u32 val;
if (!ipa_table_hash_support(ipa))
@@ -359,15 +358,22 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
- offset = ipa_reg_offset(reg);
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
- val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
- val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
- val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
- val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
+ val = reg_bit(reg, IPV6_ROUTER_HASH);
+ val |= reg_bit(reg, IPV6_FILTER_HASH);
+ val |= reg_bit(reg, IPV4_ROUTER_HASH);
+ val |= reg_bit(reg, IPV4_FILTER_HASH);
+ } else {
+ reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
- ipa_cmd_register_write_add(trans, offset, val, val, false);
+ /* IPA v5.0+ uses a unified cache (both IPv4 and IPv6) */
+ val = reg_bit(reg, ROUTER_CACHE);
+ val |= reg_bit(reg, FILTER_CACHE);
+ }
+
+ ipa_cmd_register_write_add(trans, reg_offset(reg), val, val, false);
gsi_trans_commit_wait(trans);
@@ -486,17 +492,26 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
- reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+
+ offset = reg_n_offset(reg, endpoint_id);
+ val = ioread32(endpoint->ipa->reg_virt + offset);
- offset = ipa_reg_n_offset(reg, endpoint_id);
- val = ioread32(endpoint->ipa->reg_virt + offset);
+ /* Zero all filter-related fields, preserving the rest */
+ val &= ~reg_fmask(reg, FILTER_HASH_MSK_ALL);
+ } else {
+ /* IPA v5.0 separates filter and router cache configuration */
+ reg = ipa_reg(ipa, ENDP_FILTER_CACHE_CFG);
+ offset = reg_n_offset(reg, endpoint_id);
- /* Zero all filter-related fields, preserving the rest */
- val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
+ /* Zero all filter-related fields */
+ val = 0;
+ }
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -536,17 +551,26 @@ static bool ipa_route_id_modem(struct ipa *ipa, u32 route_id)
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
- reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
- offset = ipa_reg_n_offset(reg, route_id);
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = reg_n_offset(reg, route_id);
- val = ioread32(ipa->reg_virt + offset);
+ val = ioread32(ipa->reg_virt + offset);
- /* Zero all route-related fields, preserving the rest */
- val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
+ /* Zero all route-related fields, preserving the rest */
+ val &= ~reg_fmask(reg, ROUTER_HASH_MSK_ALL);
+ } else {
+ /* IPA v5.0 separates filter and router cache configuration */
+ reg = ipa_reg(ipa, ENDP_ROUTER_CACHE_CFG);
+ offset = reg_n_offset(reg, route_id);
+
+ /* Zero all route-related fields */
+ val = 0;
+ }
iowrite32(val, ipa->reg_virt + offset);
}
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index cb8a76a75f21..7eaa0b4ebed9 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -231,7 +231,7 @@ void ipa_uc_power(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* Fill in the command data */
@@ -243,9 +243,9 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
/* Use an interrupt to tell the microcontroller the command is ready */
reg = ipa_reg(ipa, IPA_IRQ_UC);
- val = ipa_reg_bit(reg, UC_INTR);
+ val = reg_bit(reg, UC_INTR);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index d15821467743..06e75b8ece7e 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -9,7 +9,7 @@
/**
* enum ipa_version
* @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0
- * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.1
+ * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.0
* @IPA_VERSION_3_5: IPA version 3.5/GSI version 1.2
* @IPA_VERSION_3_5_1: IPA version 3.5.1/GSI version 1.3
* @IPA_VERSION_4_0: IPA version 4.0/GSI version 2.0
@@ -20,6 +20,8 @@
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
* @IPA_VERSION_5_0: IPA version 5.0/GSI version 3.0
+ * @IPA_VERSION_5_1: IPA version 5.1/GSI version 3.0
+ * @IPA_VERSION_5_5: IPA version 5.5/GSI version 5.5
* @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
@@ -38,6 +40,8 @@ enum ipa_version {
IPA_VERSION_4_9,
IPA_VERSION_4_11,
IPA_VERSION_5_0,
+ IPA_VERSION_5_1,
+ IPA_VERSION_5_5,
IPA_VERSION_COUNT, /* Last; not a version */
};
diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h
new file mode 100644
index 000000000000..57b457f39b6e
--- /dev/null
+++ b/drivers/net/ipa/reg.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* *Copyright (C) 2022-2023 Linaro Ltd. */
+
+#ifndef _REG_H_
+#define _REG_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+/**
+ * struct reg - A register descriptor
+ * @offset: Register offset relative to base of register memory
+ * @stride: Distance between two instances, if parameterized
+ * @fcount: Number of entries in the @fmask array
+ * @fmask: Array of mask values defining position and width of fields
+ * @name: Upper-case name of the register
+ */
+struct reg {
+ u32 offset;
+ u32 stride;
+ u32 fcount;
+ const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
+ const char *name;
+};
+
+/* Helper macro for defining "simple" (non-parameterized) registers */
+#define REG(__NAME, __reg_id, __offset) \
+ REG_STRIDE(__NAME, __reg_id, __offset, 0)
+
+/* Helper macro for defining parameterized registers, specifying stride */
+#define REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
+ static const struct reg reg_ ## __reg_id = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ }
+
+#define REG_FIELDS(__NAME, __name, __offset) \
+ REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
+
+#define REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
+ static const struct reg reg_ ## __name = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ .fcount = ARRAY_SIZE(reg_ ## __name ## _fmask), \
+ .fmask = reg_ ## __name ## _fmask, \
+ }
+
+/**
+ * struct regs - Description of registers supported by hardware
+ * @reg_count: Number of registers in the @reg[] array
+ * @reg: Array of register descriptors
+ */
+struct regs {
+ u32 reg_count;
+ const struct reg **reg;
+};
+
+static inline const struct reg *reg(const struct regs *regs, u32 reg_id)
+{
+ if (WARN(reg_id >= regs->reg_count,
+ "reg out of range (%u > %u)\n", reg_id, regs->reg_count - 1))
+ return NULL;
+
+ return regs->reg[reg_id];
+}
+
+/* Return the field mask for a field in a register, or 0 on error */
+static inline u32 reg_fmask(const struct reg *reg, u32 field_id)
+{
+ if (!reg || WARN_ON(field_id >= reg->fcount))
+ return 0;
+
+ return reg->fmask[field_id];
+}
+
+/* Return the mask for a single-bit field in a register, or 0 on error */
+static inline u32 reg_bit(const struct reg *reg, u32 field_id)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ if (WARN_ON(!is_power_of_2(fmask)))
+ return 0;
+
+ return fmask;
+}
+
+/* Return the maximum value representable by the given field; always 2^n - 1 */
+static inline u32 reg_field_max(const struct reg *reg, u32 field_id)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ return fmask ? fmask >> __ffs(fmask) : 0;
+}
+
+/* Encode a value into the given field of a register */
+static inline u32 reg_encode(const struct reg *reg, u32 field_id, u32 val)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ if (!fmask)
+ return 0;
+
+ val <<= __ffs(fmask);
+ if (WARN_ON(val & ~fmask))
+ return 0;
+
+ return val;
+}
+
+/* Given a register value, decode (extract) the value in the given field */
+static inline u32 reg_decode(const struct reg *reg, u32 field_id, u32 val)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ return fmask ? (val & fmask) >> __ffs(fmask) : 0;
+}
+
+/* Returns 0 for NULL reg; warning should have already been issued */
+static inline u32 reg_offset(const struct reg *reg)
+{
+ return reg ? reg->offset : 0;
+}
+
+/* Returns 0 for NULL reg; warning should have already been issued */
+static inline u32 reg_n_offset(const struct reg *reg, u32 n)
+{
+ return reg ? reg->offset + n * reg->stride : 0;
+}
+
+#endif /* _REG_H_ */
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
index 677ece3bce9e..648dbfe1fce3 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -16,9 +16,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -39,9 +39,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -52,31 +52,31 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -87,9 +87,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -100,121 +100,121 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
-IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_counter_cfg_fmask[] = {
+static const u32 reg_counter_cfg_fmask[] = {
[EOT_COAL_GRANULARITY] = GENMASK(3, 0),
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
- 0x00000408, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
- 0x0000040c, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000040c, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
- 0x00000508, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
- 0x0000050c, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
-static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+static const u32 reg_endp_init_ctrl_fmask[] = {
[ENDP_SUSPEND] = BIT(0),
[ENDP_DELAY] = BIT(1),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -223,16 +223,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -245,9 +245,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
/* Bits 29-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -257,12 +257,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 14-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -274,9 +274,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
@@ -289,25 +289,25 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
/* Entire register is a tick count */
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(31, 0),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -317,25 +317,24 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(2, 0),
/* Bits 3-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
@@ -343,9 +342,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -366,84 +365,84 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [IPA_BCR] = &ipa_reg_ipa_bcr,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [COUNTER_CFG] = &ipa_reg_counter_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
- [SRC_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_67_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
- [DST_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_67_rsrc_type,
- [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v3_1 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CTRL] = &reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
index b9c6a50de243..78b1bf60cd02 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -16,9 +16,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -44,9 +44,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -57,31 +57,31 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -92,9 +92,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -105,42 +105,42 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
-IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_counter_cfg_fmask[] = {
+static const u32 reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
[TX0_PREFETCH_DISABLE] = BIT(0),
[TX1_PREFETCH_DISABLE] = BIT(1),
[PREFETCH_ALMOST_EMPTY_SIZE] = GENMASK(4, 2),
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -151,17 +151,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -172,10 +172,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -186,10 +186,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -200,10 +200,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -214,18 +214,18 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+static const u32 reg_endp_init_ctrl_fmask[] = {
[ENDP_SUSPEND] = BIT(0),
[ENDP_DELAY] = BIT(1),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -234,16 +234,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -256,9 +256,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
/* Bits 29-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -268,12 +268,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 14-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -285,9 +285,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
@@ -300,25 +300,25 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
/* Entire register is a tick count */
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(31, 0),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -328,25 +328,24 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
@@ -354,9 +353,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -377,83 +376,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [IPA_BCR] = &ipa_reg_ipa_bcr,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [COUNTER_CFG] = &ipa_reg_counter_cfg,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v3_5_1 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CTRL] = &reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
index 9a315130530d..29e71cce4a84 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.11.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -36,9 +36,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
[GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -73,9 +73,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
[DRBIP] = BIT(31),
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -86,24 +86,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -111,9 +111,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -124,9 +124,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -137,23 +137,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -166,9 +166,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 19-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -179,17 +179,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -199,26 +199,26 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
/* Bits 9-31 reserved */
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -229,10 +229,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -243,10 +243,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -257,10 +257,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -271,10 +271,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -283,16 +283,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -305,9 +305,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -321,12 +321,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -338,9 +338,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -355,27 +355,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -385,24 +385,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -410,9 +409,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -433,83 +432,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_11 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
index 7a95149f8ec7..bb7cf488144d 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.2.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -29,9 +29,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -65,9 +65,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -78,24 +78,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -103,9 +103,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -116,9 +116,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -129,33 +129,33 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_counter_cfg_fmask[] = {
+static const u32 reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 9-31 reserved */
};
-IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -169,9 +169,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 20-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -182,17 +182,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -203,10 +203,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -217,10 +217,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -231,10 +231,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -245,10 +245,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -257,16 +257,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -279,9 +279,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
/* Bits 29-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -291,12 +291,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 14-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -308,9 +308,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
@@ -323,27 +323,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_SCALE] = GENMASK(12, 8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -353,25 +353,24 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
@@ -380,80 +379,80 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [IPA_BCR] = &ipa_reg_ipa_bcr,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [COUNTER_CFG] = &ipa_reg_counter_cfg,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_2 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_2 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
index 587eb8d4e00f..1c58f78851c2 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.5.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -30,9 +30,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -67,9 +67,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -80,24 +80,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -105,9 +105,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -118,9 +118,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -131,23 +131,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -159,9 +159,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 18-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -172,17 +172,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -192,25 +192,25 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -221,10 +221,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -235,10 +235,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -249,10 +249,10 @@ static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
- 0x00000408, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -263,10 +263,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -277,10 +277,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -291,10 +291,10 @@ static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
- 0x00000508, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -303,16 +303,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -325,9 +325,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -341,12 +341,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -357,9 +357,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -374,27 +374,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -404,24 +404,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(2, 0),
/* Bits 3-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -429,9 +428,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -452,85 +451,85 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_5 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.7.c b/drivers/net/ipa/reg/ipa_reg-v4.7.c
index 21f8a58e59a0..731824fce1d4 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.7.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.7.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -30,9 +30,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -67,9 +67,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
[DRBIP] = BIT(31),
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -80,24 +80,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -105,9 +105,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -118,9 +118,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -131,23 +131,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -160,9 +160,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 19-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -173,17 +173,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -193,25 +193,25 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -222,10 +222,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -236,10 +236,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -250,10 +250,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -264,10 +264,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -276,16 +276,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -298,9 +298,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -314,12 +314,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -330,9 +330,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -347,27 +347,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -377,24 +377,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -402,9 +401,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -425,83 +424,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_7 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_7 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
index 1f67a03fe599..01f87b5290e0 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.9.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -35,9 +35,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
[GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -72,9 +72,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
[DRBIP] = BIT(31),
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -85,24 +85,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -110,9 +110,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -123,9 +123,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -136,23 +136,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -165,9 +165,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 19-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -178,17 +178,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -198,25 +198,25 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -227,10 +227,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -241,10 +241,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -255,10 +255,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -269,10 +269,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -281,16 +281,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -302,9 +302,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -318,12 +318,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -335,9 +335,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -352,27 +352,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -382,24 +382,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -407,9 +406,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -430,83 +429,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_9 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index bb1c298c1e78..460b3d4f2245 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -157,7 +157,7 @@ void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
return NULL;
ip4h = ip_hdr(skb);
- pktlen = ntohs(ip4h->tot_len);
+ pktlen = skb_ip_totlen(skb);
if (ip4h->ihl < 5 || ip4h->version != 4)
return NULL;
if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index bfa16826a6e1..90309980686e 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -215,6 +215,17 @@ config MDIO_BUS_MUX_MESON_G12A
the amlogic g12a SoC. The multiplexers connects either the external
or the internal MDIO bus to the parent bus.
+config MDIO_BUS_MUX_MESON_GXL
+ tristate "Amlogic GXL based MDIO bus multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
+ select MDIO_BUS_MUX
+ default m if ARCH_MESON
+ help
+ This module provides a driver for the MDIO multiplexer/glue of
+ the amlogic GXL SoC. The multiplexer connects either the external
+ or the internal MDIO bus to the parent bus.
+
config MDIO_BUS_MUX_BCM6368
tristate "Broadcom BCM6368 MDIO bus multiplexers"
depends on OF && OF_MDIO && (BMIPS_GENERIC || COMPILE_TEST)
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
index 15f8dc4042ce..7d4cb4c11e4e 100644
--- a/drivers/net/mdio/Makefile
+++ b/drivers/net/mdio/Makefile
@@ -28,5 +28,6 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM6368) += mdio-mux-bcm6368.o
obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
+obj-$(CONFIG_MDIO_BUS_MUX_MESON_GXL) += mdio-mux-meson-gxl.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index f0b554df62a9..910e5cf74e89 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -53,7 +53,6 @@
#define MESON_G12A_MDIO_INTERNAL_ID 1
struct g12a_mdio_mux {
- bool pll_is_enabled;
void __iomem *regs;
void *mux_handle;
struct clk *pll;
@@ -154,14 +153,12 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
int ret;
/* Enable the phy clock */
- if (!priv->pll_is_enabled) {
+ if (!__clk_is_enabled(priv->pll)) {
ret = clk_prepare_enable(priv->pll);
if (ret)
return ret;
}
- priv->pll_is_enabled = true;
-
/* Initialize ephy control */
writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
@@ -192,10 +189,8 @@ static int g12a_enable_external_mdio(struct g12a_mdio_mux *priv)
writel_relaxed(0x0, priv->regs + ETH_PHY_CNTL2);
/* Disable the phy clock if enabled */
- if (priv->pll_is_enabled) {
+ if (__clk_is_enabled(priv->pll))
clk_disable_unprepare(priv->pll);
- priv->pll_is_enabled = false;
- }
return 0;
}
@@ -347,7 +342,7 @@ static int g12a_mdio_mux_remove(struct platform_device *pdev)
mdio_mux_uninit(priv->mux_handle);
- if (priv->pll_is_enabled)
+ if (__clk_is_enabled(priv->pll))
clk_disable_unprepare(priv->pll);
return 0;
diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c
new file mode 100644
index 000000000000..76188575ca1f
--- /dev/null
+++ b/drivers/net/mdio/mdio-mux-meson-gxl.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Baylibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define ETH_REG2 0x0
+#define REG2_PHYID GENMASK(21, 0)
+#define EPHY_GXL_ID 0x110181
+#define REG2_LEDACT GENMASK(23, 22)
+#define REG2_LEDLINK GENMASK(25, 24)
+#define REG2_DIV4SEL BIT(27)
+#define REG2_ADCBYPASS BIT(30)
+#define REG2_CLKINSEL BIT(31)
+#define ETH_REG3 0x4
+#define REG3_ENH BIT(3)
+#define REG3_CFGMODE GENMASK(6, 4)
+#define REG3_AUTOMDIX BIT(7)
+#define REG3_PHYADDR GENMASK(12, 8)
+#define REG3_PWRUPRST BIT(21)
+#define REG3_PWRDOWN BIT(22)
+#define REG3_LEDPOL BIT(23)
+#define REG3_PHYMDI BIT(26)
+#define REG3_CLKINEN BIT(29)
+#define REG3_PHYIP BIT(30)
+#define REG3_PHYEN BIT(31)
+#define ETH_REG4 0x8
+#define REG4_PWRUPRSTSIG BIT(0)
+
+#define MESON_GXL_MDIO_EXTERNAL_ID 0
+#define MESON_GXL_MDIO_INTERNAL_ID 1
+
+struct gxl_mdio_mux {
+ void __iomem *regs;
+ void *mux_handle;
+};
+
+static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv)
+{
+ u32 val;
+
+ /* Setup the internal phy */
+ val = (REG3_ENH |
+ FIELD_PREP(REG3_CFGMODE, 0x7) |
+ REG3_AUTOMDIX |
+ FIELD_PREP(REG3_PHYADDR, 8) |
+ REG3_LEDPOL |
+ REG3_PHYMDI |
+ REG3_CLKINEN |
+ REG3_PHYIP);
+
+ writel(REG4_PWRUPRSTSIG, priv->regs + ETH_REG4);
+ writel(val, priv->regs + ETH_REG3);
+ mdelay(10);
+
+ /* NOTE: The HW kept the phy id configurable at runtime.
+ * The id below is arbitrary. It is the one used in the vendor code.
+ * The only constraint is that it must match the one in
+ * drivers/net/phy/meson-gxl.c to properly match the PHY.
+ */
+ writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
+ priv->regs + ETH_REG2);
+
+ /* Enable the internal phy */
+ val |= REG3_PHYEN;
+ writel(val, priv->regs + ETH_REG3);
+ writel(0, priv->regs + ETH_REG4);
+
+ /* The phy needs a bit of time to power up */
+ mdelay(10);
+}
+
+static void gxl_enable_external_mdio(struct gxl_mdio_mux *priv)
+{
+ /* Reset the mdio bus mux to the external phy */
+ writel(0, priv->regs + ETH_REG3);
+}
+
+static int gxl_mdio_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct gxl_mdio_mux *priv = dev_get_drvdata(data);
+
+ if (current_child == desired_child)
+ return 0;
+
+ switch (desired_child) {
+ case MESON_GXL_MDIO_EXTERNAL_ID:
+ gxl_enable_external_mdio(priv);
+ break;
+ case MESON_GXL_MDIO_INTERNAL_ID:
+ gxl_enable_internal_mdio(priv);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id gxl_mdio_mux_match[] = {
+ { .compatible = "amlogic,gxl-mdio-mux", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gxl_mdio_mux_match);
+
+static int gxl_mdio_mux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gxl_mdio_mux *priv;
+ struct clk *rclk;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ rclk = devm_clk_get_enabled(dev, "ref");
+ if (IS_ERR(rclk))
+ return dev_err_probe(dev, PTR_ERR(rclk),
+ "failed to get reference clock\n");
+
+ ret = mdio_mux_init(dev, dev->of_node, gxl_mdio_switch_fn,
+ &priv->mux_handle, dev, NULL);
+ if (ret)
+ dev_err_probe(dev, ret, "mdio multiplexer init failed\n");
+
+ return ret;
+}
+
+static int gxl_mdio_mux_remove(struct platform_device *pdev)
+{
+ struct gxl_mdio_mux *priv = platform_get_drvdata(pdev);
+
+ mdio_mux_uninit(priv->mux_handle);
+
+ return 0;
+}
+
+static struct platform_driver gxl_mdio_mux_driver = {
+ .probe = gxl_mdio_mux_probe,
+ .remove = gxl_mdio_mux_remove,
+ .driver = {
+ .name = "gxl-mdio-mux",
+ .of_match_table = gxl_mdio_mux_match,
+ },
+};
+module_platform_driver(gxl_mdio_mux_driver);
+
+MODULE_DESCRIPTION("Amlogic GXL MDIO multiplexer driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index f88095b0f836..6045bece2654 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1609,7 +1609,6 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
goto err_hwstats_exit;
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devl_unlock(devlink);
return 0;
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index c1424119e821..323bec5e57f8 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -121,15 +121,11 @@ static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
* struct miic - MII converter structure
* @base: base address of the MII converter
* @dev: Device associated to the MII converter
- * @clks: Clocks used for this device
- * @nclk: Number of clocks
* @lock: Lock used for read-modify-write access
*/
struct miic {
void __iomem *base;
struct device *dev;
- struct clk_bulk_data *clks;
- int nclk;
spinlock_t lock;
};
@@ -232,7 +228,7 @@ static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
}
miic_reg_rmw(miic, MIIC_CONVCTRL(port), mask, val);
- miic_converter_enable(miic_port->miic, miic_port->port, 1);
+ miic_converter_enable(miic, miic_port->port, 1);
return 0;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index f5df2edc94a5..54874555c921 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -257,7 +257,7 @@ config MOTORCOMM_PHY
tristate "Motorcomm PHYs"
help
Enables support for Motorcomm network PHYs.
- Currently supports the YT8511, YT8521, YT8531S Gigabit Ethernet PHYs.
+ Currently supports YT85xx Gigabit Ethernet PHYs.
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index a6f05e35d91f..b7cb71817780 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -233,7 +233,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- if (!dp83822->fx_enabled)
+ /* Private data pointer is NULL on DP83825/26 */
+ if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
DP83822_SPEED_CHANGED_INT_EN;
@@ -253,7 +254,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- if (!dp83822->fx_enabled)
+ /* Private data pointer is NULL on DP83825/26 */
+ if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index c49062ad72c6..a6015cd03bff 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -261,6 +261,8 @@ static struct phy_driver meson_gxl_phy[] = {
.handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x01803301),
.name = "Meson G12A Internal PHY",
@@ -271,6 +273,8 @@ static struct phy_driver meson_gxl_phy[] = {
.handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
},
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index d5b80c31ab91..727de4f4a14d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -268,6 +268,9 @@ struct kszphy_type {
u16 interrupt_level_mask;
u16 cable_diag_reg;
unsigned long pair_mask;
+ u16 disable_dll_tx_bit;
+ u16 disable_dll_rx_bit;
+ u16 disable_dll_mask;
bool has_broadcast_disable;
bool has_nand_tree_disable;
bool has_rmii_ref_clk_sel;
@@ -364,6 +367,21 @@ static const struct kszphy_type ksz9021_type = {
.interrupt_level_mask = BIT(14),
};
+static const struct kszphy_type ksz9131_type = {
+ .interrupt_level_mask = BIT(14),
+ .disable_dll_tx_bit = BIT(12),
+ .disable_dll_rx_bit = BIT(12),
+ .disable_dll_mask = BIT_MASK(12),
+};
+
+static const struct kszphy_type lan8841_type = {
+ .disable_dll_tx_bit = BIT(14),
+ .disable_dll_rx_bit = BIT(14),
+ .disable_dll_mask = BIT_MASK(14),
+ .cable_diag_reg = LAN8814_CABLE_DIAG,
+ .pair_mask = LAN8814_WIRE_PAIR_MASK,
+};
+
static int kszphy_extended_write(struct phy_device *phydev,
u32 regnum, u16 val)
{
@@ -1172,19 +1190,18 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev,
#define KSZ9131RN_MMD_COMMON_CTRL_REG 2
#define KSZ9131RN_RXC_DLL_CTRL 76
#define KSZ9131RN_TXC_DLL_CTRL 77
-#define KSZ9131RN_DLL_CTRL_BYPASS BIT_MASK(12)
#define KSZ9131RN_DLL_ENABLE_DELAY 0
-#define KSZ9131RN_DLL_DISABLE_DELAY BIT(12)
static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
u16 rxcdll_val, txcdll_val;
int ret;
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
- rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
- txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ rxcdll_val = type->disable_dll_rx_bit;
+ txcdll_val = type->disable_dll_tx_bit;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
@@ -1192,10 +1209,10 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
- txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ txcdll_val = type->disable_dll_tx_bit;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
- rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ rxcdll_val = type->disable_dll_rx_bit;
txcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
break;
default:
@@ -1203,13 +1220,13 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
}
ret = phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
- KSZ9131RN_RXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ KSZ9131RN_RXC_DLL_CTRL, type->disable_dll_mask,
rxcdll_val);
if (ret < 0)
return ret;
return phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
- KSZ9131RN_TXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ KSZ9131RN_TXC_DLL_CTRL, type->disable_dll_mask,
txcdll_val);
}
@@ -3152,6 +3169,146 @@ static int lan8814_probe(struct phy_device *phydev)
return 0;
}
+#define LAN8841_MMD_TIMER_REG 0
+#define LAN8841_MMD0_REGISTER_17 17
+#define LAN8841_MMD0_REGISTER_17_DROP_OPT(x) ((x) & 0x3)
+#define LAN8841_MMD0_REGISTER_17_XMIT_TOG_TX_DIS BIT(3)
+#define LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG 2
+#define LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG_MAGJACK BIT(14)
+#define LAN8841_MMD_ANALOG_REG 28
+#define LAN8841_ANALOG_CONTROL_1 1
+#define LAN8841_ANALOG_CONTROL_1_PLL_TRIM(x) (((x) & 0x3) << 5)
+#define LAN8841_ANALOG_CONTROL_10 13
+#define LAN8841_ANALOG_CONTROL_10_PLL_DIV(x) ((x) & 0x3)
+#define LAN8841_ANALOG_CONTROL_11 14
+#define LAN8841_ANALOG_CONTROL_11_LDO_REF(x) (((x) & 0x7) << 12)
+#define LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT 69
+#define LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT_VAL 0xbffc
+#define LAN8841_BTRX_POWER_DOWN 70
+#define LAN8841_BTRX_POWER_DOWN_QBIAS_CH_A BIT(0)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_A BIT(1)
+#define LAN8841_BTRX_POWER_DOWN_QBIAS_CH_B BIT(2)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_B BIT(3)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_C BIT(5)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_D BIT(7)
+#define LAN8841_ADC_CHANNEL_MASK 198
+
+static int lan8841_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz9131_config_init(phydev);
+ if (ret)
+ return ret;
+
+ /* 100BT Clause 40 improvenent errata */
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_1,
+ LAN8841_ANALOG_CONTROL_1_PLL_TRIM(0x2));
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_10,
+ LAN8841_ANALOG_CONTROL_10_PLL_DIV(0x1));
+
+ /* 10M/100M Ethernet Signal Tuning Errata for Shorted-Center Tap
+ * Magnetics
+ */
+ ret = phy_read_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG);
+ if (ret & LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG_MAGJACK) {
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT,
+ LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT_VAL);
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_BTRX_POWER_DOWN,
+ LAN8841_BTRX_POWER_DOWN_QBIAS_CH_A |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_A |
+ LAN8841_BTRX_POWER_DOWN_QBIAS_CH_B |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_B |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_C |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_D);
+ }
+
+ /* LDO Adjustment errata */
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_11,
+ LAN8841_ANALOG_CONTROL_11_LDO_REF(1));
+
+ /* 100BT RGMII latency tuning errata */
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
+ LAN8841_ADC_CHANNEL_MASK, 0x0);
+ phy_write_mmd(phydev, LAN8841_MMD_TIMER_REG,
+ LAN8841_MMD0_REGISTER_17,
+ LAN8841_MMD0_REGISTER_17_DROP_OPT(2) |
+ LAN8841_MMD0_REGISTER_17_XMIT_TOG_TX_DIS);
+
+ return 0;
+}
+
+#define LAN8841_OUTPUT_CTRL 25
+#define LAN8841_OUTPUT_CTRL_INT_BUFFER BIT(14)
+
+static int lan8841_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ phy_modify(phydev, LAN8841_OUTPUT_CTRL,
+ LAN8841_OUTPUT_CTRL_INT_BUFFER, 0);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, LAN8814_INTS);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, LAN8814_INTC,
+ LAN8814_INT_LINK);
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = phy_read(phydev, LAN8814_INTS);
+ }
+
+ return err;
+}
+
+static irqreturn_t lan8841_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, LAN8814_INTS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status & LAN8814_INT_LINK) {
+ phy_trigger_machine(phydev);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+#define LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER 3
+#define LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER_STRAP_RGMII_EN BIT(0)
+
+static int lan8841_probe(struct phy_device *phydev)
+{
+ int err;
+
+ err = kszphy_probe(phydev);
+ if (err)
+ return err;
+
+ if (phy_read_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER) &
+ LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER_STRAP_RGMII_EN)
+ phydev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -3362,12 +3519,30 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = lan8804_config_intr,
.handle_interrupt = lan8804_handle_interrupt,
}, {
+ .phy_id = PHY_ID_LAN8841,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip LAN8841 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
+ .driver_data = &lan8841_type,
+ .config_init = lan8841_config_init,
+ .probe = lan8841_probe,
+ .soft_reset = genphy_soft_reset,
+ .config_intr = lan8841_config_intr,
+ .handle_interrupt = lan8841_handle_interrupt,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
+}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
- .driver_data = &ksz9021_type,
+ .driver_data = &ksz9131_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
.config_intr = kszphy_config_intr,
@@ -3446,6 +3621,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 685190db72de..ee7c37dfdca0 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Motorcomm 8511/8521/8531S PHY driver.
+ * Motorcomm 8511/8521/8531/8531S PHY driver.
*
* Author: Peter Geis <pgwipeout@gmail.com>
* Author: Frank <Frank.Sae@motor-comm.com>
@@ -10,10 +10,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/of.h>
#define PHY_ID_YT8511 0x0000010a
-#define PHY_ID_YT8521 0x0000011A
-#define PHY_ID_YT8531S 0x4F51E91A
+#define PHY_ID_YT8521 0x0000011a
+#define PHY_ID_YT8531 0x4f51e91b
+#define PHY_ID_YT8531S 0x4f51e91a
/* YT8521/YT8531S Register Overview
* UTP Register space | FIBER Register space
@@ -161,6 +163,11 @@
#define YT8521_CHIP_CONFIG_REG 0xA001
#define YT8521_CCR_SW_RST BIT(15)
+/* 1b0 disable 1.9ns rxc clock delay *default*
+ * 1b1 enable 1.9ns rxc clock delay
+ */
+#define YT8521_CCR_RXC_DLY_EN BIT(8)
+#define YT8521_CCR_RXC_DLY_1_900_NS 1900
#define YT8521_CCR_MODE_SEL_MASK (BIT(2) | BIT(1) | BIT(0))
#define YT8521_CCR_MODE_UTP_TO_RGMII 0
@@ -178,22 +185,29 @@
#define YT8521_MODE_POLL 0x3
#define YT8521_RGMII_CONFIG1_REG 0xA003
-
-/* TX Gig-E Delay is bits 3:0, default 0x1
- * TX Fast-E Delay is bits 7:4, default 0xf
- * RX Delay is bits 13:10, default 0x0
- * Delay = 150ps * N
- * On = 2250ps, off = 0ps
+/* 1b0 use original tx_clk_rgmii *default*
+ * 1b1 use inverted tx_clk_rgmii.
*/
-#define YT8521_RC1R_RX_DELAY_MASK (0xF << 10)
-#define YT8521_RC1R_RX_DELAY_EN (0xF << 10)
-#define YT8521_RC1R_RX_DELAY_DIS (0x0 << 10)
-#define YT8521_RC1R_FE_TX_DELAY_MASK (0xF << 4)
-#define YT8521_RC1R_FE_TX_DELAY_EN (0xF << 4)
-#define YT8521_RC1R_FE_TX_DELAY_DIS (0x0 << 4)
-#define YT8521_RC1R_GE_TX_DELAY_MASK (0xF << 0)
-#define YT8521_RC1R_GE_TX_DELAY_EN (0xF << 0)
-#define YT8521_RC1R_GE_TX_DELAY_DIS (0x0 << 0)
+#define YT8521_RC1R_TX_CLK_SEL_INVERTED BIT(14)
+#define YT8521_RC1R_RX_DELAY_MASK GENMASK(13, 10)
+#define YT8521_RC1R_FE_TX_DELAY_MASK GENMASK(7, 4)
+#define YT8521_RC1R_GE_TX_DELAY_MASK GENMASK(3, 0)
+#define YT8521_RC1R_RGMII_0_000_NS 0
+#define YT8521_RC1R_RGMII_0_150_NS 1
+#define YT8521_RC1R_RGMII_0_300_NS 2
+#define YT8521_RC1R_RGMII_0_450_NS 3
+#define YT8521_RC1R_RGMII_0_600_NS 4
+#define YT8521_RC1R_RGMII_0_750_NS 5
+#define YT8521_RC1R_RGMII_0_900_NS 6
+#define YT8521_RC1R_RGMII_1_050_NS 7
+#define YT8521_RC1R_RGMII_1_200_NS 8
+#define YT8521_RC1R_RGMII_1_350_NS 9
+#define YT8521_RC1R_RGMII_1_500_NS 10
+#define YT8521_RC1R_RGMII_1_650_NS 11
+#define YT8521_RC1R_RGMII_1_800_NS 12
+#define YT8521_RC1R_RGMII_1_950_NS 13
+#define YT8521_RC1R_RGMII_2_100_NS 14
+#define YT8521_RC1R_RGMII_2_250_NS 15
#define YTPHY_MISC_CONFIG_REG 0xA006
#define YTPHY_MCR_FIBER_SPEED_MASK BIT(0)
@@ -222,11 +236,36 @@
*/
#define YTPHY_WCR_TYPE_PULSE BIT(0)
-#define YT8531S_SYNCE_CFG_REG 0xA012
-#define YT8531S_SCR_SYNCE_ENABLE BIT(6)
+#define YTPHY_SYNCE_CFG_REG 0xA012
+#define YT8521_SCR_SYNCE_ENABLE BIT(5)
+/* 1b0 output 25m clock
+ * 1b1 output 125m clock *default*
+ */
+#define YT8521_SCR_CLK_FRE_SEL_125M BIT(3)
+#define YT8521_SCR_CLK_SRC_MASK GENMASK(2, 1)
+#define YT8521_SCR_CLK_SRC_PLL_125M 0
+#define YT8521_SCR_CLK_SRC_UTP_RX 1
+#define YT8521_SCR_CLK_SRC_SDS_RX 2
+#define YT8521_SCR_CLK_SRC_REF_25M 3
+#define YT8531_SCR_SYNCE_ENABLE BIT(6)
+/* 1b0 output 25m clock *default*
+ * 1b1 output 125m clock
+ */
+#define YT8531_SCR_CLK_FRE_SEL_125M BIT(4)
+#define YT8531_SCR_CLK_SRC_MASK GENMASK(3, 1)
+#define YT8531_SCR_CLK_SRC_PLL_125M 0
+#define YT8531_SCR_CLK_SRC_UTP_RX 1
+#define YT8531_SCR_CLK_SRC_SDS_RX 2
+#define YT8531_SCR_CLK_SRC_CLOCK_FROM_DIGITAL 3
+#define YT8531_SCR_CLK_SRC_REF_25M 4
+#define YT8531_SCR_CLK_SRC_SSC_25M 5
/* Extended Register end */
+#define YTPHY_DTS_OUTPUT_CLK_DIS 0
+#define YTPHY_DTS_OUTPUT_CLK_25M 25000000
+#define YTPHY_DTS_OUTPUT_CLK_125M 125000000
+
struct yt8521_priv {
/* combo_advertising is used for case of YT8521 in combo mode,
* this means that yt8521 may work in utp or fiber mode which depends
@@ -479,6 +518,61 @@ err_restore_page:
return phy_restore_page(phydev, old_page, ret);
}
+static int yt8531_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ const u16 mac_addr_reg[] = {
+ YTPHY_WOL_MACADDR2_REG,
+ YTPHY_WOL_MACADDR1_REG,
+ YTPHY_WOL_MACADDR0_REG,
+ };
+ const u8 *mac_addr;
+ u16 mask, val;
+ int ret;
+ u8 i;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ mac_addr = phydev->attached_dev->dev_addr;
+
+ /* Store the device address for the magic packet */
+ for (i = 0; i < 3; i++) {
+ ret = ytphy_write_ext_with_lock(phydev, mac_addr_reg[i],
+ ((mac_addr[i * 2] << 8)) |
+ (mac_addr[i * 2 + 1]));
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable WOL feature */
+ mask = YTPHY_WCR_PULSE_WIDTH_MASK | YTPHY_WCR_INTR_SEL;
+ val = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL;
+ val |= YTPHY_WCR_TYPE_PULSE | YTPHY_WCR_PULSE_WIDTH_672MS;
+ ret = ytphy_modify_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG,
+ mask, val);
+ if (ret < 0)
+ return ret;
+
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, 0,
+ YTPHY_IER_WOL);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Disable WOL feature */
+ mask = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL;
+ ret = ytphy_modify_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG,
+ mask, 0);
+
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG,
+ YTPHY_IER_WOL, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int yt8511_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, YT8511_PAGE_SELECT);
@@ -594,6 +688,153 @@ static int yt8521_write_page(struct phy_device *phydev, int page)
};
/**
+ * struct ytphy_cfg_reg_map - map a config value to a register value
+ * @cfg: value in device configuration
+ * @reg: value in the register
+ */
+struct ytphy_cfg_reg_map {
+ u32 cfg;
+ u32 reg;
+};
+
+static const struct ytphy_cfg_reg_map ytphy_rgmii_delays[] = {
+ /* for tx delay / rx delay with YT8521_CCR_RXC_DLY_EN is not set. */
+ { 0, YT8521_RC1R_RGMII_0_000_NS },
+ { 150, YT8521_RC1R_RGMII_0_150_NS },
+ { 300, YT8521_RC1R_RGMII_0_300_NS },
+ { 450, YT8521_RC1R_RGMII_0_450_NS },
+ { 600, YT8521_RC1R_RGMII_0_600_NS },
+ { 750, YT8521_RC1R_RGMII_0_750_NS },
+ { 900, YT8521_RC1R_RGMII_0_900_NS },
+ { 1050, YT8521_RC1R_RGMII_1_050_NS },
+ { 1200, YT8521_RC1R_RGMII_1_200_NS },
+ { 1350, YT8521_RC1R_RGMII_1_350_NS },
+ { 1500, YT8521_RC1R_RGMII_1_500_NS },
+ { 1650, YT8521_RC1R_RGMII_1_650_NS },
+ { 1800, YT8521_RC1R_RGMII_1_800_NS },
+ { 1950, YT8521_RC1R_RGMII_1_950_NS }, /* default tx/rx delay */
+ { 2100, YT8521_RC1R_RGMII_2_100_NS },
+ { 2250, YT8521_RC1R_RGMII_2_250_NS },
+
+ /* only for rx delay with YT8521_CCR_RXC_DLY_EN is set. */
+ { 0 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_000_NS },
+ { 150 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_150_NS },
+ { 300 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_300_NS },
+ { 450 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_450_NS },
+ { 600 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_600_NS },
+ { 750 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_750_NS },
+ { 900 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_900_NS },
+ { 1050 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_050_NS },
+ { 1200 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_200_NS },
+ { 1350 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_350_NS },
+ { 1500 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_500_NS },
+ { 1650 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_650_NS },
+ { 1800 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_800_NS },
+ { 1950 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_950_NS },
+ { 2100 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_2_100_NS },
+ { 2250 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_2_250_NS }
+};
+
+static u32 ytphy_get_delay_reg_value(struct phy_device *phydev,
+ const char *prop_name,
+ const struct ytphy_cfg_reg_map *tbl,
+ int tb_size,
+ u16 *rxc_dly_en,
+ u32 dflt)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ int tb_size_half = tb_size / 2;
+ u32 val;
+ int i;
+
+ if (of_property_read_u32(node, prop_name, &val))
+ goto err_dts_val;
+
+ /* when rxc_dly_en is NULL, it is get the delay for tx, only half of
+ * tb_size is valid.
+ */
+ if (!rxc_dly_en)
+ tb_size = tb_size_half;
+
+ for (i = 0; i < tb_size; i++) {
+ if (tbl[i].cfg == val) {
+ if (rxc_dly_en && i < tb_size_half)
+ *rxc_dly_en = 0;
+ return tbl[i].reg;
+ }
+ }
+
+ phydev_warn(phydev, "Unsupported value %d for %s using default (%u)\n",
+ val, prop_name, dflt);
+
+err_dts_val:
+ /* when rxc_dly_en is not NULL, it is get the delay for rx.
+ * The rx default in dts and ytphy_rgmii_clk_delay_config is 1950 ps,
+ * so YT8521_CCR_RXC_DLY_EN should not be set.
+ */
+ if (rxc_dly_en)
+ *rxc_dly_en = 0;
+
+ return dflt;
+}
+
+static int ytphy_rgmii_clk_delay_config(struct phy_device *phydev)
+{
+ int tb_size = ARRAY_SIZE(ytphy_rgmii_delays);
+ u16 rxc_dly_en = YT8521_CCR_RXC_DLY_EN;
+ u32 rx_reg, tx_reg;
+ u16 mask, val = 0;
+ int ret;
+
+ rx_reg = ytphy_get_delay_reg_value(phydev, "rx-internal-delay-ps",
+ ytphy_rgmii_delays, tb_size,
+ &rxc_dly_en,
+ YT8521_RC1R_RGMII_1_950_NS);
+ tx_reg = ytphy_get_delay_reg_value(phydev, "tx-internal-delay-ps",
+ ytphy_rgmii_delays, tb_size, NULL,
+ YT8521_RC1R_RGMII_1_950_NS);
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ rxc_dly_en = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val |= FIELD_PREP(YT8521_RC1R_RX_DELAY_MASK, rx_reg);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rxc_dly_en = 0;
+ val |= FIELD_PREP(YT8521_RC1R_GE_TX_DELAY_MASK, tx_reg);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val |= FIELD_PREP(YT8521_RC1R_RX_DELAY_MASK, rx_reg) |
+ FIELD_PREP(YT8521_RC1R_GE_TX_DELAY_MASK, tx_reg);
+ break;
+ default: /* do not support other modes */
+ return -EOPNOTSUPP;
+ }
+
+ ret = ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG,
+ YT8521_CCR_RXC_DLY_EN, rxc_dly_en);
+ if (ret < 0)
+ return ret;
+
+ /* Generally, it is not necessary to adjust YT8521_RC1R_FE_TX_DELAY */
+ mask = YT8521_RC1R_RX_DELAY_MASK | YT8521_RC1R_GE_TX_DELAY_MASK;
+ return ytphy_modify_ext(phydev, YT8521_RGMII_CONFIG1_REG, mask, val);
+}
+
+static int ytphy_rgmii_clk_delay_config_with_lock(struct phy_device *phydev)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = ytphy_rgmii_clk_delay_config(phydev);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
* yt8521_probe() - read chip config then set suitable polling_mode
* @phydev: a pointer to a &struct phy_device
*
@@ -601,9 +842,12 @@ static int yt8521_write_page(struct phy_device *phydev, int page)
*/
static int yt8521_probe(struct phy_device *phydev)
{
+ struct device_node *node = phydev->mdio.dev.of_node;
struct device *dev = &phydev->mdio.dev;
struct yt8521_priv *priv;
int chip_config;
+ u16 mask, val;
+ u32 freq;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -648,27 +892,107 @@ static int yt8521_probe(struct phy_device *phydev)
return ret;
}
- return 0;
+ if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq))
+ freq = YTPHY_DTS_OUTPUT_CLK_DIS;
+
+ if (phydev->drv->phy_id == PHY_ID_YT8521) {
+ switch (freq) {
+ case YTPHY_DTS_OUTPUT_CLK_DIS:
+ mask = YT8521_SCR_SYNCE_ENABLE;
+ val = 0;
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_25M:
+ mask = YT8521_SCR_SYNCE_ENABLE |
+ YT8521_SCR_CLK_SRC_MASK |
+ YT8521_SCR_CLK_FRE_SEL_125M;
+ val = YT8521_SCR_SYNCE_ENABLE |
+ FIELD_PREP(YT8521_SCR_CLK_SRC_MASK,
+ YT8521_SCR_CLK_SRC_REF_25M);
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_125M:
+ mask = YT8521_SCR_SYNCE_ENABLE |
+ YT8521_SCR_CLK_SRC_MASK |
+ YT8521_SCR_CLK_FRE_SEL_125M;
+ val = YT8521_SCR_SYNCE_ENABLE |
+ YT8521_SCR_CLK_FRE_SEL_125M |
+ FIELD_PREP(YT8521_SCR_CLK_SRC_MASK,
+ YT8521_SCR_CLK_SRC_PLL_125M);
+ break;
+ default:
+ phydev_warn(phydev, "Freq err:%u\n", freq);
+ return -EINVAL;
+ }
+ } else if (phydev->drv->phy_id == PHY_ID_YT8531S) {
+ switch (freq) {
+ case YTPHY_DTS_OUTPUT_CLK_DIS:
+ mask = YT8531_SCR_SYNCE_ENABLE;
+ val = 0;
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_25M:
+ mask = YT8531_SCR_SYNCE_ENABLE |
+ YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_REF_25M);
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_125M:
+ mask = YT8531_SCR_SYNCE_ENABLE |
+ YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE |
+ YT8531_SCR_CLK_FRE_SEL_125M |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_PLL_125M);
+ break;
+ default:
+ phydev_warn(phydev, "Freq err:%u\n", freq);
+ return -EINVAL;
+ }
+ } else {
+ phydev_warn(phydev, "PHY id err\n");
+ return -EINVAL;
+ }
+
+ return ytphy_modify_ext_with_lock(phydev, YTPHY_SYNCE_CFG_REG, mask,
+ val);
}
-/**
- * yt8531s_probe() - read chip config then set suitable polling_mode
- * @phydev: a pointer to a &struct phy_device
- *
- * returns 0 or negative errno code
- */
-static int yt8531s_probe(struct phy_device *phydev)
+static int yt8531_probe(struct phy_device *phydev)
{
- int ret;
+ struct device_node *node = phydev->mdio.dev.of_node;
+ u16 mask, val;
+ u32 freq;
- /* Disable SyncE clock output by default */
- ret = ytphy_modify_ext_with_lock(phydev, YT8531S_SYNCE_CFG_REG,
- YT8531S_SCR_SYNCE_ENABLE, 0);
- if (ret < 0)
- return ret;
+ if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq))
+ freq = YTPHY_DTS_OUTPUT_CLK_DIS;
+
+ switch (freq) {
+ case YTPHY_DTS_OUTPUT_CLK_DIS:
+ mask = YT8531_SCR_SYNCE_ENABLE;
+ val = 0;
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_25M:
+ mask = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_REF_25M);
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_125M:
+ mask = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_FRE_SEL_125M |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_PLL_125M);
+ break;
+ default:
+ phydev_warn(phydev, "Freq err:%u\n", freq);
+ return -EINVAL;
+ }
- /* same as yt8521_probe */
- return yt8521_probe(phydev);
+ return ytphy_modify_ext_with_lock(phydev, YTPHY_SYNCE_CFG_REG, mask,
+ val);
}
/**
@@ -1133,65 +1457,128 @@ static int yt8521_resume(struct phy_device *phydev)
*/
static int yt8521_config_init(struct phy_device *phydev)
{
+ struct device_node *node = phydev->mdio.dev.of_node;
int old_page;
int ret = 0;
- u16 val;
old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
if (old_page < 0)
goto err_restore_page;
- switch (phydev->interface) {
- case PHY_INTERFACE_MODE_RGMII:
- val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_FE_TX_DELAY_DIS;
- val |= YT8521_RC1R_RX_DELAY_DIS;
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_FE_TX_DELAY_DIS;
- val |= YT8521_RC1R_RX_DELAY_EN;
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_FE_TX_DELAY_EN;
- val |= YT8521_RC1R_RX_DELAY_DIS;
- break;
- case PHY_INTERFACE_MODE_RGMII_ID:
- val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_FE_TX_DELAY_EN;
- val |= YT8521_RC1R_RX_DELAY_EN;
- break;
- case PHY_INTERFACE_MODE_SGMII:
- break;
- default: /* do not support other modes */
- ret = -EOPNOTSUPP;
- goto err_restore_page;
- }
-
/* set rgmii delay mode */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII) {
- ret = ytphy_modify_ext(phydev, YT8521_RGMII_CONFIG1_REG,
- (YT8521_RC1R_RX_DELAY_MASK |
- YT8521_RC1R_FE_TX_DELAY_MASK |
- YT8521_RC1R_GE_TX_DELAY_MASK),
- val);
+ ret = ytphy_rgmii_clk_delay_config(phydev);
if (ret < 0)
goto err_restore_page;
}
- /* disable auto sleep */
- ret = ytphy_modify_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1_REG,
- YT8521_ESC1R_SLEEP_SW, 0);
- if (ret < 0)
- goto err_restore_page;
-
- /* enable RXC clock when no wire plug */
- ret = ytphy_modify_ext(phydev, YT8521_CLOCK_GATING_REG,
- YT8521_CGR_RX_CLK_EN, 0);
- if (ret < 0)
- goto err_restore_page;
+ if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) {
+ /* disable auto sleep */
+ ret = ytphy_modify_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW, 0);
+ if (ret < 0)
+ goto err_restore_page;
+ }
+ if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) {
+ /* enable RXC clock when no wire plug */
+ ret = ytphy_modify_ext(phydev, YT8521_CLOCK_GATING_REG,
+ YT8521_CGR_RX_CLK_EN, 0);
+ if (ret < 0)
+ goto err_restore_page;
+ }
err_restore_page:
return phy_restore_page(phydev, old_page, ret);
}
+static int yt8531_config_init(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ int ret;
+
+ ret = ytphy_rgmii_clk_delay_config_with_lock(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) {
+ /* disable auto sleep */
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) {
+ /* enable RXC clock when no wire plug */
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YT8521_CLOCK_GATING_REG,
+ YT8521_CGR_RX_CLK_EN, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * yt8531_link_change_notify() - Adjust the tx clock direction according to
+ * the current speed and dts config.
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * NOTE: This function is only used to adapt to VF2 with JH7110 SoC. Please
+ * keep "motorcomm,tx-clk-adj-enabled" not exist in dts when the soc is not
+ * JH7110.
+ */
+static void yt8531_link_change_notify(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ bool tx_clk_adj_enabled = false;
+ bool tx_clk_1000_inverted;
+ bool tx_clk_100_inverted;
+ bool tx_clk_10_inverted;
+ u16 val = 0;
+ int ret;
+
+ if (of_property_read_bool(node, "motorcomm,tx-clk-adj-enabled"))
+ tx_clk_adj_enabled = true;
+
+ if (!tx_clk_adj_enabled)
+ return;
+
+ if (of_property_read_bool(node, "motorcomm,tx-clk-10-inverted"))
+ tx_clk_10_inverted = true;
+ if (of_property_read_bool(node, "motorcomm,tx-clk-100-inverted"))
+ tx_clk_100_inverted = true;
+ if (of_property_read_bool(node, "motorcomm,tx-clk-1000-inverted"))
+ tx_clk_1000_inverted = true;
+
+ if (phydev->speed < 0)
+ return;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ if (tx_clk_1000_inverted)
+ val = YT8521_RC1R_TX_CLK_SEL_INVERTED;
+ break;
+ case SPEED_100:
+ if (tx_clk_100_inverted)
+ val = YT8521_RC1R_TX_CLK_SEL_INVERTED;
+ break;
+ case SPEED_10:
+ if (tx_clk_10_inverted)
+ val = YT8521_RC1R_TX_CLK_SEL_INVERTED;
+ break;
+ default:
+ return;
+ }
+
+ ret = ytphy_modify_ext_with_lock(phydev, YT8521_RGMII_CONFIG1_REG,
+ YT8521_RC1R_TX_CLK_SEL_INVERTED, val);
+ if (ret < 0)
+ phydev_warn(phydev, "Modify TX_CLK_SEL err:%d\n", ret);
+}
+
/**
* yt8521_prepare_fiber_features() - A small helper function that setup
* fiber's features.
@@ -1775,10 +2162,21 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.resume = yt8521_resume,
},
{
+ PHY_ID_MATCH_EXACT(PHY_ID_YT8531),
+ .name = "YT8531 Gigabit Ethernet",
+ .probe = yt8531_probe,
+ .config_init = yt8531_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .get_wol = ytphy_get_wol,
+ .set_wol = yt8531_set_wol,
+ .link_change_notify = yt8531_link_change_notify,
+ },
+ {
PHY_ID_MATCH_EXACT(PHY_ID_YT8531S),
.name = "YT8531S Gigabit Ethernet",
.get_features = yt8521_get_features,
- .probe = yt8531s_probe,
+ .probe = yt8521_probe,
.read_page = yt8521_read_page,
.write_page = yt8521_write_page,
.get_wol = ytphy_get_wol,
@@ -1795,7 +2193,7 @@ static struct phy_driver motorcomm_phy_drvs[] = {
module_phy_driver(motorcomm_phy_drvs);
-MODULE_DESCRIPTION("Motorcomm 8511/8521/8531S PHY driver");
+MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S PHY driver");
MODULE_AUTHOR("Peter Geis");
MODULE_AUTHOR("Frank");
MODULE_LICENSE("GPL");
@@ -1803,8 +2201,9 @@ MODULE_LICENSE("GPL");
static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8511) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8521) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_YT8531) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) },
- { /* sentinal */ }
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(mdio, motorcomm_tbl);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9ba8f973f26f..a3917c7acbd3 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1538,7 +1538,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* another mac interface, so we should create a device link between
* phy dev and mac dev.
*/
- if (phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent)
+ if (dev && phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent)
phydev->devlink = device_link_add(dev->dev.parent, &phydev->mdio.dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 319790221d7f..ea8fcce5b2d9 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1816,10 +1816,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
- if (ret) {
- phy_device_free(phy_dev);
+ phy_device_free(phy_dev);
+ if (ret)
return ret;
- }
ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index a2be1994b389..8941aa199ea3 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -533,7 +533,7 @@ static int tap_open(struct inode *inode, struct file *file)
q->sock.state = SS_CONNECTED;
q->sock.file = file;
q->sock.ops = &tap_socket_ops;
- sock_init_data(&q->sock, &q->sk);
+ sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
q->sk.sk_write_space = tap_sock_write_space;
q->sk.sk_destruct = tap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 36620afde373..ad653b32b2f0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -3453,7 +3453,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
- sock_init_data(&tfile->socket, &tfile->sk);
+ sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 2c82fbcaab22..7a2b0094de51 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -57,9 +57,7 @@
static inline int
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
{
- return usbnet_read_cmd(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR |
- USB_RECIP_DEVICE,
+ return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
val, index, NULL, 0);
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ddc3dc7ea73c..fb5e68ed3ec2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -723,7 +723,7 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
* have enough headroom.
*/
static struct page *xdp_linearize_page(struct receive_queue *rq,
- u16 *num_buf,
+ int *num_buf,
struct page *p,
int offset,
int page_off,
@@ -823,7 +823,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
int offset = buf - page_address(page) + header_offset;
unsigned int tlen = len + vi->hdr_len;
- u16 num_buf = 1;
+ int num_buf = 1;
xdp_headroom = virtnet_get_headroom(vi);
header_offset = VIRTNET_RX_PAD + xdp_headroom;
@@ -996,7 +996,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
void *buf,
unsigned int len,
unsigned int frame_sz,
- u16 *num_buf,
+ int *num_buf,
unsigned int *xdp_frags_truesize,
struct virtnet_rq_stats *stats)
{
@@ -1014,6 +1014,9 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
+ if (!*num_buf)
+ return 0;
+
if (*num_buf > 1) {
/* If we want to build multi-buffer xdp, we need
* to specify that the flags of xdp_buff have the
@@ -1027,10 +1030,10 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
shinfo->xdp_frags_size = 0;
}
- if ((*num_buf - 1) > MAX_SKB_FRAGS)
+ if (*num_buf > MAX_SKB_FRAGS + 1)
return -EINVAL;
- while ((--*num_buf) >= 1) {
+ while (--*num_buf > 0) {
buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
@@ -1083,7 +1086,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
- u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
+ int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
struct sk_buff *head_skb, *curr_skb;
@@ -2279,9 +2282,9 @@ static int virtnet_close(struct net_device *dev)
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
- xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
- napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
+ napi_disable(&vi->rq[i].napi);
+ xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
}
return 0;
@@ -3816,6 +3819,12 @@ static int virtnet_validate(struct virtio_device *vdev)
__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
}
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
+ !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
+ dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
+ __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
+ }
+
return 0;
}
@@ -3929,6 +3938,8 @@ static int virtnet_probe(struct virtio_device *vdev)
eth_hw_addr_set(dev, addr);
} else {
eth_hw_addr_random(dev);
+ dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
+ dev->dev_addr);
}
/* Set up our device-specific information */
@@ -4058,6 +4069,24 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ /* a random MAC address has been assigned, notify the device.
+ * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
+ * because many devices work fine without getting MAC explicitly
+ */
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ struct scatterlist sg;
+
+ sg_init_one(&sg, dev->dev_addr, dev->addr_len);
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
+ pr_debug("virtio_net: setting MAC address failed\n");
+ rtnl_unlock();
+ err = -EINVAL;
+ goto free_unregister_netdev;
+ }
+ }
+
rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
index 7eff3531b9a5..7ff33c1d6ac7 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
@@ -152,6 +152,15 @@ static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
}
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data)
+{
+ struct dpmaif_isr_para *isr_para = data;
+ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
+
t7xx_dpmaif_irq_cb(isr_para);
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
return IRQ_HANDLED;
@@ -188,7 +197,7 @@ static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
- t7xx_dev->intr_thread[int_type] = NULL;
+ t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread;
t7xx_dev->callback_param[int_type] = isr_para;
t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index aa2174a10437..f4ff2198b5ef 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -840,14 +840,13 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
return work_done;
}
- if (!rxq->sleep_lock_pending) {
- pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev);
+ if (!rxq->sleep_lock_pending)
t7xx_pci_disable_sleep(t7xx_dev);
- }
ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
if (!ret) {
@@ -876,22 +875,22 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
napi_complete_done(napi, work_done);
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
+ atomic_set(&rxq->rx_processing, 0);
} else {
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
}
- t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
- pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
- pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev);
- atomic_set(&rxq->rx_processing, 0);
-
return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
{
struct dpmaif_rx_queue *rxq;
- int qno;
+ struct dpmaif_ctrl *ctrl;
+ int qno, ret;
qno = ffs(que_mask) - 1;
if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
@@ -900,6 +899,18 @@ void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int
}
rxq = &dpmaif_ctrl->rxq[qno];
+ ctrl = rxq->dpmaif_ctrl;
+ /* We need to make sure that the modem has been resumed before
+ * calling napi. This can't be done inside the polling function
+ * as we could be blocked waiting for device to be resumed,
+ * which can't be done from softirq context the poll function
+ * is running in.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
+ return;
+ }
napi_schedule(&rxq->napi);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index 494a28e386a3..3ef4a8a4f8fd 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
@@ -45,12 +46,25 @@
static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
{
- int i;
+ struct dpmaif_ctrl *ctrl;
+ int i, ret;
+
+ ctrl = ctlb->hif_ctrl;
if (ctlb->is_napi_en)
return;
for (i = 0; i < RXQ_NUM; i++) {
+ /* The usage count has to be bumped every time before calling
+ * napi_schedule. It will be decresed in the poll routine,
+ * right after napi_complete_done is called.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n",
+ ret);
+ return;
+ }
napi_enable(ctlb->napi[i]);
napi_schedule(ctlb->napi[i]);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 871f2a27a398..226fc1703e90 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -121,6 +121,8 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+ pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
+ pm_runtime_allow(&t7xx_dev->pdev->dev);
pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
}