summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/bpf/prog_flow_dissector.rst2
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml20
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fec.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/loongson,ls1b-gmac.yaml114
-rw-r--r--Documentation/devicetree/bindings/net/loongson,ls1c-emac.yaml113
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml8
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/mediatek,mt7986-wo-ccif.yaml1
-rw-r--r--Documentation/driver-api/dpll.rst500
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/netlink/specs/dpll.yaml488
-rw-r--r--Documentation/netlink/specs/handshake.yaml8
-rw-r--r--Documentation/netlink/specs/netdev.yaml21
-rw-r--r--Documentation/networking/device_drivers/appletalk/cops.rst80
-rw-r--r--Documentation/networking/device_drivers/appletalk/index.rst18
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/idpf.rst160
-rw-r--r--Documentation/networking/device_drivers/index.rst1
-rw-r--r--Documentation/networking/ip-sysctl.rst18
-rw-r--r--Documentation/networking/pktgen.rst12
-rw-r--r--Documentation/networking/xdp-rx-metadata.rst7
-rw-r--r--MAINTAINERS26
-rw-r--r--arch/arm/net/bpf_jit_32.c280
-rw-r--r--arch/arm/net/bpf_jit_32.h4
-rw-r--r--arch/arm64/net/bpf_jit_comp.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c148
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/dpll/Kconfig7
-rw-r--r--drivers/dpll/Makefile9
-rw-r--r--drivers/dpll/dpll_core.c798
-rw-r--r--drivers/dpll/dpll_core.h89
-rw-r--r--drivers/dpll/dpll_netlink.c1253
-rw-r--r--drivers/dpll/dpll_netlink.h13
-rw-r--r--drivers/dpll/dpll_nl.c162
-rw-r--r--drivers/dpll/dpll_nl.h51
-rw-r--r--drivers/net/Space.c6
-rw-r--r--drivers/net/appletalk/Kconfig30
-rw-r--r--drivers/net/appletalk/Makefile1
-rw-r--r--drivers/net/appletalk/cops.c1005
-rw-r--r--drivers/net/appletalk/cops.h61
-rw-r--r--drivers/net/appletalk/cops_ffdrv.h532
-rw-r--r--drivers/net/appletalk/cops_ltdrv.h241
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c6
-rw-r--r--drivers/net/dsa/b53/b53_srab.c8
-rw-r--r--drivers/net/dsa/bcm_sf2.c8
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c8
-rw-r--r--drivers/net/dsa/lantiq_gswip.c8
-rw-r--r--drivers/net/dsa/microchip/Makefile2
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h21
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c79
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h38
-rw-r--r--drivers/net/dsa/microchip/ksz9477_acl.c1436
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h20
-rw-r--r--drivers/net/dsa/microchip/ksz9477_tc_flower.c281
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c521
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h31
-rw-r--r--drivers/net/dsa/mt7530-mmio.c7
-rw-r--r--drivers/net/dsa/mt7530.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c2
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c8
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c8
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c8
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c44
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c8
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c21
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c8
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c18
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c5
-rw-r--r--drivers/net/ethernet/8390/ne.c5
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c5
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c6
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c43
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h7
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c11
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c50
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c6
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c6
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c6
-rw-r--r--drivers/net/ethernet/apple/macmace.c6
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c6
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c5
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c80
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c150
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h545
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c241
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c6
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c6
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c5
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c8
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c5
-rw-r--r--drivers/net/ethernet/cortina/gemini.c12
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dnet.c6
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c6
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c5
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c8
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c6
-rw-r--r--drivers/net/ethernet/intel/Kconfig13
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h18
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c102
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c46
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c79
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h258
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c662
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1904
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h108
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c675
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c758
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c22
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h968
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c621
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h130
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h169
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c171
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c165
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_devids.h10
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c1369
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h124
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h293
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h128
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c2379
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c279
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h20
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c1183
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c4289
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1023
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c163
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c3791
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h1273
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h451
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/korina.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c6
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c6
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c6
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c11
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c168
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h22
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c24
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h18
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c213
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h13
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c86
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c53
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c62
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c58
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h19
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c1432
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h57
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c400
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c98
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h369
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c6
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c6
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c6
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c6
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c6
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c77
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c6
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c6
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c27
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c6
-rw-r--r--drivers/net/ethernet/sgi/meth.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c6
-rw-r--r--drivers/net/ethernet/socionext/netsec.c8
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c209
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h5
-rw-r--r--drivers/net/ethernet/sun/niu.c5
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sunqe.c6
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c6
-rw-r--r--drivers/net/ethernet/ti/cpmac.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c6
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c14
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c22
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c5
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c6
-rw-r--r--drivers/net/ethernet/via/via-rhine.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c92
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h7
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h1
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c119
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c56
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c6
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c8
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c13
-rw-r--r--drivers/net/gtp.c4
-rw-r--r--drivers/net/hamradio/Kconfig15
-rw-r--r--drivers/net/ipa/ipa_power.c2
-rw-r--r--drivers/net/macsec.c6
-rw-r--r--drivers/net/mdio/mdio-aspeed.c6
-rw-r--r--drivers/net/mdio/mdio-bcm-iproc.c6
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c6
-rw-r--r--drivers/net/mdio/mdio-gpio.c6
-rw-r--r--drivers/net/mdio/mdio-hisi-femac.c6
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c6
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c7
-rw-r--r--drivers/net/mdio/mdio-moxart.c6
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c6
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c6
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c6
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c5
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c6
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c6
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c6
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c6
-rw-r--r--drivers/net/mdio/mdio-octeon.c5
-rw-r--r--drivers/net/mdio/mdio-sun4i.c6
-rw-r--r--drivers/net/mdio/mdio-xgene.c6
-rw-r--r--drivers/net/phy/Kconfig4
-rw-r--r--drivers/net/phy/amd.c33
-rw-r--r--drivers/net/phy/ax88796b.c2
-rw-r--r--drivers/net/phy/phy.c207
-rw-r--r--drivers/net/phy/sfp.c8
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/r8152.c85
-rw-r--r--drivers/net/virtio_net.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c2
-rw-r--r--drivers/net/wwan/wwan_core.c5
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/ptp/ptp_ocp.c369
-rw-r--r--drivers/vhost/vsock.c14
-rw-r--r--include/linux/avf/virtchnl.h11
-rw-r--r--include/linux/bpf.h40
-rw-r--r--include/linux/bpf_verifier.h9
-rw-r--r--include/linux/ceph/mon_client.h2
-rw-r--r--include/linux/compiler_types.h32
-rw-r--r--include/linux/dpll.h152
-rw-r--r--include/linux/filter.h64
-rw-r--r--include/linux/fortify-string.h4
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/ipv6.h50
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/mlx5/device.h1
-rw-r--r--include/linux/mlx5/driver.h2
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h60
-rw-r--r--include/linux/netdevice.h23
-rw-r--r--include/linux/overflow.h35
-rw-r--r--include/linux/pds/pds_core_if.h1
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h76
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tcp.h22
-rw-r--r--include/linux/udp.h66
-rw-r--r--include/linux/virtio_vsock.h10
-rw-r--r--include/net/Space.h1
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/devlink.h9
-rw-r--r--include/net/dsa.h13
-rw-r--r--include/net/dst.h4
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/inet_sock.h10
-rw-r--r--include/net/ip.h16
-rw-r--r--include/net/ip6_route.h19
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/ipv6.h36
-rw-r--r--include/net/mana/hw_channel.h2
-rw-r--r--include/net/mana/mana.h2
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/pkt_sched.h8
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sock.h41
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/net/udp_tunnel.h14
-rw-r--r--include/net/udplite.h14
-rw-r--r--include/net/xdp.h19
-rw-r--r--include/net/xdp_sock.h2
-rw-r--r--include/net/xfrm.h2
-rw-r--r--include/trace/events/mptcp.h2
-rw-r--r--include/trace/events/vsock_virtio_transport_common.h12
-rw-r--r--include/uapi/linux/bpf.h9
-rw-r--r--include/uapi/linux/devlink.h1
-rw-r--r--include/uapi/linux/dpll.h201
-rw-r--r--include/uapi/linux/if_link.h2
-rw-r--r--include/uapi/linux/netdev.h16
-rw-r--r--include/uapi/linux/pkt_sched.h15
-rw-r--r--include/uapi/linux/tcp.h12
-rw-r--r--kernel/bpf/bpf_struct_ops.c26
-rw-r--r--kernel/bpf/btf.c34
-rw-r--r--kernel/bpf/core.c37
-rw-r--r--kernel/bpf/helpers.c61
-rw-r--r--kernel/bpf/memalloc.c30
-rw-r--r--kernel/bpf/offload.c18
-rw-r--r--kernel/bpf/syscall.c6
-rw-r--r--kernel/bpf/task_iter.c40
-rw-r--r--kernel/bpf/trampoline.c4
-rw-r--r--kernel/bpf/verifier.c643
-rw-r--r--lib/test_bpf.c371
-rw-r--r--mm/kasan/kasan.h1
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/can/j1939/socket.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/core/dev.c22
-rw-r--r--net/core/dst.c10
-rw-r--r--net/core/netdev-genl.c12
-rw-r--r--net/core/pktgen.c102
-rw-r--r--net/core/rtnetlink.c36
-rw-r--r--net/core/skbuff.c19
-rw-r--r--net/core/sock.c178
-rw-r--r--net/core/xdp.c4
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c10
-rw-r--r--net/devlink/core.c217
-rw-r--r--net/devlink/dev.c50
-rw-r--r--net/devlink/devl_internal.h34
-rw-r--r--net/devlink/linecard.c80
-rw-r--r--net/devlink/netlink.c26
-rw-r--r--net/devlink/port.c55
-rw-r--r--net/dsa/port.c5
-rw-r--r--net/dsa/port.h3
-rw-r--r--net/dsa/slave.c9
-rw-r--r--net/dsa/tag_ksz.c8
-rw-r--r--net/handshake/genl.c2
-rw-r--r--net/handshake/netlink.c2
-rw-r--r--net/handshake/tlshd.c6
-rw-r--r--net/ipv4/datagram.c6
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/ip_output.c15
-rw-r--r--net/ipv4/ip_sockglue.c192
-rw-r--r--net/ipv4/ping.c13
-rw-r--r--net/ipv4/raw.c19
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_bbr.c13
-rw-r--r--net/ipv4/tcp_input.c36
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tcp_metrics.c22
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_output.c42
-rw-r--r--net/ipv4/tcp_timer.c19
-rw-r--r--net/ipv4/udp.c92
-rw-r--r--net/ipv4/udp_offload.c4
-rw-r--r--net/ipv4/udp_tunnel_core.c2
-rw-r--r--net/ipv4/udp_tunnel_nic.c11
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv4/xfrm4_input.c4
-rw-r--r--net/ipv6/addrconf.c38
-rw-r--r--net/ipv6/af_inet6.c9
-rw-r--r--net/ipv6/datagram.c15
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ip6_output.c46
-rw-r--r--net/ipv6/ip6_udp_tunnel.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c242
-rw-r--r--net/ipv6/mcast.c6
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/ping.c4
-rw-r--r--net/ipv6/raw.c16
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/tcp_ipv6.c25
-rw-r--r--net/ipv6/udp.c46
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_input.c4
-rw-r--r--net/l2tp/l2tp_core.c6
-rw-r--r--net/l2tp/l2tp_eth.c34
-rw-r--r--net/l2tp/l2tp_ip6.c6
-rw-r--r--net/mac80211/tdls.c2
-rw-r--r--net/mptcp/sockopt.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c16
-rw-r--r--net/netfilter/nf_nat_proto.c64
-rw-r--r--net/netfilter/nf_tables_api.c43
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/openvswitch/actions.c27
-rw-r--r--net/openvswitch/meter.h4
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/sched/cls_route.c37
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_fq.c385
-rw-r--r--net/sched/sch_frag.c4
-rw-r--r--net/sched/sch_generic.c9
-rw-r--r--net/sctp/ipv6.c9
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/smc/af_smc.c2
-rw-r--r--net/tipc/link.c4
-rw-r--r--net/tls/tls_sw.c2
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/vmw_vsock/virtio_transport.c92
-rw-r--r--net/vmw_vsock/virtio_transport_common.c307
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/xdp/xsk.c4
-rw-r--r--net/xdp/xsk_buff_pool.c3
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--samples/bpf/Makefile4
-rw-r--r--tools/bpf/bpftool/gen.c2
-rw-r--r--tools/include/uapi/linux/bpf.h9
-rw-r--r--tools/include/uapi/linux/netdev.h16
-rw-r--r--tools/lib/bpf/bpf_helpers.h1
-rw-r--r--tools/lib/bpf/btf.c160
-rw-r--r--tools/lib/bpf/libbpf.c166
-rw-r--r--tools/net/ynl/Makefile1
-rw-r--r--tools/net/ynl/generated/Makefile2
-rw-r--r--tools/net/ynl/generated/handshake-user.h10
-rw-r--r--tools/net/ynl/generated/netdev-user.c19
-rw-r--r--tools/net/ynl/generated/netdev-user.h3
-rw-r--r--tools/net/ynl/samples/Makefile3
-rw-r--r--tools/net/ynl/samples/netdev.c8
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch641
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x1
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h319
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/exceptions.c409
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fill_link_info.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_str.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/percpu_alloc.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c269
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions.c368
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_assert.c135
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_ext.c72
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_fail.c347
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_array.c183
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c105
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_fail.c164
-rw-r--r--tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c106
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c18
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c18
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bswap.c3
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_gotol.c3
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ldsx.c3
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c3
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sdiv.c3
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_synctypes.py9
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh40
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c134
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h8
-rwxr-xr-xtools/testing/selftests/bpf/xsk_prereqs.sh10
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c535
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h44
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh2
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh981
-rwxr-xr-xtools/testing/selftests/netfilter/nf_nat_edemux.sh46
-rw-r--r--tools/testing/selftests/tc-testing/README65
-rw-r--r--tools/testing/selftests/tc-testing/TdcPlugin.py4
-rw-r--r--tools/testing/selftests/tc-testing/TdcResults.py3
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py194
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/rootPlugin.py4
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py5
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json45
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json69
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ct.json54
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ctinfo.json36
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json75
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gate.json36
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json144
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json72
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json159
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/nat.json81
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json198
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json102
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/sample.json87
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/simple.json27
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json90
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json54
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json117
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json108
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/xt.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json10
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/fw.json315
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json141
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/route.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/u32.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/actions.json144
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/filter.json9
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/cake.json82
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbs.json38
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/choke.json30
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/codel.json34
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json10
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/etf.json18
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/ets.json284
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json98
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json68
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_codel.json54
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json5
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/gred.json28
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/hfsc.json26
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/hhf.json36
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/htb.json46
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/ingress.json36
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json62
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/pfifo_fast.json18
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/plug.json30
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/prio.json85
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json39
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json34
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json48
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json40
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/skbprio.json16
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/tbf.json36
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json34
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py250
-rw-r--r--tools/testing/vsock/util.c124
-rw-r--r--tools/testing/vsock/util.h3
-rw-r--r--tools/testing/vsock/vsock_test.c325
687 files changed, 47345 insertions, 8962 deletions
diff --git a/Documentation/bpf/prog_flow_dissector.rst b/Documentation/bpf/prog_flow_dissector.rst
index 4d86780ab0f1..f24270b8b034 100644
--- a/Documentation/bpf/prog_flow_dissector.rst
+++ b/Documentation/bpf/prog_flow_dissector.rst
@@ -113,7 +113,7 @@ Flags
used by ``eth_get_headlen`` to estimate length of all headers for GRO.
* ``BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL`` - tells BPF flow dissector to
stop parsing as soon as it reaches IPv6 flow label; used by
- ``___skb_get_hash`` and ``__skb_get_hash_symmetric`` to get flow hash.
+ ``___skb_get_hash`` to get flow hash.
* ``BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP`` - tells BPF flow dissector to stop
parsing as soon as it reaches encapsulated headers; used by routing
infrastructure.
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
index 28ded09d72e3..e7720caf31b3 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
@@ -22,6 +22,7 @@ properties:
- mediatek,mt7622-wed
- mediatek,mt7981-wed
- mediatek,mt7986-wed
+ - mediatek,mt7988-wed
- const: syscon
reg:
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index 8103154bbb52..c77d7b155a4c 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -49,6 +49,8 @@ properties:
- hisilicon,peri-subctrl
- hpe,gxp-sysreg
- intel,lgm-syscon
+ - loongson,ls1b-syscon
+ - loongson,ls1c-syscon
- marvell,armada-3700-usb2-host-misc
- mediatek,mt8135-pctl-a-syscfg
- mediatek,mt8135-pctl-b-syscfg
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
index 03b5567be389..41014f5c01c4 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -49,6 +49,26 @@ properties:
Set if the output SYNCLKO clock should be disabled. Do not mix with
microchip,synclko-125.
+ microchip,io-drive-strength-microamp:
+ description:
+ IO Pad Drive Strength
+ enum: [8000, 16000]
+ default: 16000
+
+ microchip,hi-drive-strength-microamp:
+ description:
+ High Speed Drive Strength. Controls drive strength of GMII / RGMII /
+ MII / RMII (except TX_CLK/REFCLKI, COL and CRS) and CLKO_25_125 lines.
+ enum: [2000, 4000, 8000, 12000, 16000, 20000, 24000, 28000]
+ default: 24000
+
+ microchip,lo-drive-strength-microamp:
+ description:
+ Low Speed Drive Strength. Controls drive strength of TX_CLK / REFCLKI,
+ COL, CRS, LEDs, PME_N, NTRP_N, SDO and SDI/SDA/MDIO lines.
+ enum: [2000, 4000, 8000, 12000, 16000, 20000, 24000, 28000]
+ default: 8000
+
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/fsl,fec.yaml b/Documentation/devicetree/bindings/net/fsl,fec.yaml
index b494e009326e..8948a11c994e 100644
--- a/Documentation/devicetree/bindings/net/fsl,fec.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,fec.yaml
@@ -59,6 +59,7 @@ properties:
- const: fsl,imx6sx-fec
- items:
- enum:
+ - fsl,imx8dxl-fec
- fsl,imx8qxp-fec
- const: fsl,imx8qm-fec
- const: fsl,imx6sx-fec
diff --git a/Documentation/devicetree/bindings/net/loongson,ls1b-gmac.yaml b/Documentation/devicetree/bindings/net/loongson,ls1b-gmac.yaml
new file mode 100644
index 000000000000..c4f3224bad38
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/loongson,ls1b-gmac.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/loongson,ls1b-gmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson-1B Gigabit Ethernet MAC Controller
+
+maintainers:
+ - Keguang Zhang <keguang.zhang@gmail.com>
+
+description: |
+ Loongson-1B Gigabit Ethernet MAC Controller is based on
+ Synopsys DesignWare MAC (version 3.50a).
+
+ Main features
+ - Dual 10/100/1000Mbps GMAC controllers
+ - Full-duplex operation (IEEE 802.3x flow control automatic transmission)
+ - Half-duplex operation (CSMA/CD Protocol and back-pressure support)
+ - RX Checksum Offload
+ - TX Checksum insertion
+ - MII interface
+ - RGMII interface
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - loongson,ls1b-gmac
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - loongson,ls1b-gmac
+ - const: snps,dwmac-3.50a
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: stmmaceth
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: macirq
+
+ loongson,ls1-syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon containing some extra configurations
+ including PHY interface mode.
+
+ phy-mode:
+ enum:
+ - mii
+ - rgmii-id
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - loongson,ls1-syscon
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/loongson,ls1x-clk.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ gmac0: ethernet@1fe10000 {
+ compatible = "loongson,ls1b-gmac", "snps,dwmac-3.50a";
+ reg = <0x1fe10000 0x10000>;
+
+ clocks = <&clkc LS1X_CLKID_AHB>;
+ clock-names = "stmmaceth";
+
+ interrupt-parent = <&intc1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+
+ loongson,ls1-syscon = <&syscon>;
+
+ phy-handle = <&phy0>;
+ phy-mode = "mii";
+ snps,pbl = <1>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy0: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/loongson,ls1c-emac.yaml b/Documentation/devicetree/bindings/net/loongson,ls1c-emac.yaml
new file mode 100644
index 000000000000..99001b940b83
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/loongson,ls1c-emac.yaml
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/loongson,ls1c-emac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson-1C Ethernet MAC Controller
+
+maintainers:
+ - Keguang Zhang <keguang.zhang@gmail.com>
+
+description: |
+ Loongson-1C Ethernet MAC Controller is based on
+ Synopsys DesignWare MAC (version 3.50a).
+
+ Main features
+ - 10/100Mbps
+ - Full-duplex operation (IEEE 802.3x flow control automatic transmission)
+ - Half-duplex operation (CSMA/CD Protocol and back-pressure support)
+ - IEEE 802.1Q VLAN tag detection for reception frames
+ - MII interface
+ - RMII interface
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - loongson,ls1c-emac
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - loongson,ls1c-emac
+ - const: snps,dwmac-3.50a
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: stmmaceth
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: macirq
+
+ loongson,ls1-syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon containing some extra configurations
+ including PHY interface mode.
+
+ phy-mode:
+ enum:
+ - mii
+ - rmii
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - loongson,ls1-syscon
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/loongson,ls1x-clk.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ emac: ethernet@1fe10000 {
+ compatible = "loongson,ls1c-emac", "snps,dwmac-3.50a";
+ reg = <0x1fe10000 0x10000>;
+
+ clocks = <&clkc LS1X_CLKID_AHB>;
+ clock-names = "stmmaceth";
+
+ interrupt-parent = <&intc1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+
+ loongson,ls1-syscon = <&syscon>;
+
+ phy-handle = <&phy0>;
+ phy-mode = "mii";
+ snps,pbl = <1>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy0: ethernet-phy@13 {
+ reg = <0x13>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index ddf9522a5dc2..5c2769dc689a 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -394,6 +394,11 @@ properties:
When a PFC frame is received with priorities matching the bitmask,
the queue is blocked from transmitting for the pause time specified
in the PFC frame.
+
+ snps,coe-unsupported:
+ type: boolean
+ description: TX checksum offload is unsupported by the TX queue.
+
allOf:
- if:
required:
diff --git a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
index 311c570165f9..229c8f32019f 100644
--- a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
+++ b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
@@ -19,6 +19,7 @@ allOf:
properties:
compatible:
enum:
+ - ti,am642-icssg-prueth # for AM64x SoC family
- ti,am654-icssg-prueth # for AM65x SoC family
sram:
@@ -106,6 +107,13 @@ properties:
phandle to system controller node and register offset
to ICSSG control register for RGMII transmit delay
+ ti,half-duplex-capable:
+ type: boolean
+ description:
+ Indicates that the PHY output pin COL is routed to ICSSG GPIO pin
+ (PRGx_PRU0/1_GPIO10) as input so that the ICSSG MII port is
+ capable of half duplex operations.
+
required:
- reg
anyOf:
diff --git a/Documentation/devicetree/bindings/soc/mediatek/mediatek,mt7986-wo-ccif.yaml b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mt7986-wo-ccif.yaml
index f0fa92b04b32..3b212f26abc5 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/mediatek,mt7986-wo-ccif.yaml
+++ b/Documentation/devicetree/bindings/soc/mediatek/mediatek,mt7986-wo-ccif.yaml
@@ -20,6 +20,7 @@ properties:
items:
- enum:
- mediatek,mt7986-wo-ccif
+ - mediatek,mt7988-wo-ccif
- const: syscon
reg:
diff --git a/Documentation/driver-api/dpll.rst b/Documentation/driver-api/dpll.rst
new file mode 100644
index 000000000000..69670deb8c4e
--- /dev/null
+++ b/Documentation/driver-api/dpll.rst
@@ -0,0 +1,500 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============================
+The Linux kernel dpll subsystem
+===============================
+
+DPLL
+====
+
+PLL - Phase Locked Loop is an electronic circuit which syntonizes clock
+signal of a device with an external clock signal. Effectively enabling
+device to run on the same clock signal beat as provided on a PLL input.
+
+DPLL - Digital Phase Locked Loop is an integrated circuit which in
+addition to plain PLL behavior incorporates a digital phase detector
+and may have digital divider in the loop. As a result, the frequency on
+DPLL's input and output may be configurable.
+
+Subsystem
+=========
+
+The main purpose of dpll subsystem is to provide general interface
+to configure devices that use any kind of Digital PLL and could use
+different sources of input signal to synchronize to, as well as
+different types of outputs.
+The main interface is NETLINK_GENERIC based protocol with an event
+monitoring multicast group defined.
+
+Device object
+=============
+
+Single dpll device object means single Digital PLL circuit and bunch of
+connected pins.
+It reports the supported modes of operation and current status to the
+user in response to the `do` request of netlink command
+``DPLL_CMD_DEVICE_GET`` and list of dplls registered in the subsystem
+with `dump` netlink request of the same command.
+Changing the configuration of dpll device is done with `do` request of
+netlink ``DPLL_CMD_DEVICE_SET`` command.
+A device handle is ``DPLL_A_ID``, it shall be provided to get or set
+configuration of particular device in the system. It can be obtained
+with a ``DPLL_CMD_DEVICE_GET`` `dump` request or
+a ``DPLL_CMD_DEVICE_ID_GET`` `do` request, where the one must provide
+attributes that result in single device match.
+
+Pin object
+==========
+
+A pin is amorphic object which represents either input or output, it
+could be internal component of the device, as well as externally
+connected.
+The number of pins per dpll vary, but usually multiple pins shall be
+provided for a single dpll device.
+Pin's properties, capabilities and status is provided to the user in
+response to `do` request of netlink ``DPLL_CMD_PIN_GET`` command.
+It is also possible to list all the pins that were registered in the
+system with `dump` request of ``DPLL_CMD_PIN_GET`` command.
+Configuration of a pin can be changed by `do` request of netlink
+``DPLL_CMD_PIN_SET`` command.
+Pin handle is a ``DPLL_A_PIN_ID``, it shall be provided to get or set
+configuration of particular pin in the system. It can be obtained with
+``DPLL_CMD_PIN_GET`` `dump` request or ``DPLL_CMD_PIN_ID_GET`` `do`
+request, where user provides attributes that result in single pin match.
+
+Pin selection
+=============
+
+In general, selected pin (the one which signal is driving the dpll
+device) can be obtained from ``DPLL_A_PIN_STATE`` attribute, and only
+one pin shall be in ``DPLL_PIN_STATE_CONNECTED`` state for any dpll
+device.
+
+Pin selection can be done either manually or automatically, depending
+on hardware capabilities and active dpll device work mode
+(``DPLL_A_MODE`` attribute). The consequence is that there are
+differences for each mode in terms of available pin states, as well as
+for the states the user can request for a dpll device.
+
+In manual mode (``DPLL_MODE_MANUAL``) the user can request or receive
+one of following pin states:
+
+- ``DPLL_PIN_STATE_CONNECTED`` - the pin is used to drive dpll device
+- ``DPLL_PIN_STATE_DISCONNECTED`` - the pin is not used to drive dpll
+ device
+
+In automatic mode (``DPLL_MODE_AUTOMATIC``) the user can request or
+receive one of following pin states:
+
+- ``DPLL_PIN_STATE_SELECTABLE`` - the pin shall be considered as valid
+ input for automatic selection algorithm
+- ``DPLL_PIN_STATE_DISCONNECTED`` - the pin shall be not considered as
+ a valid input for automatic selection algorithm
+
+In automatic mode (``DPLL_MODE_AUTOMATIC``) the user can only receive
+pin state ``DPLL_PIN_STATE_CONNECTED`` once automatic selection
+algorithm locks a dpll device with one of the inputs.
+
+Shared pins
+===========
+
+A single pin object can be attached to multiple dpll devices.
+Then there are two groups of configuration knobs:
+
+1) Set on a pin - the configuration affects all dpll devices pin is
+ registered to (i.e., ``DPLL_A_PIN_FREQUENCY``),
+2) Set on a pin-dpll tuple - the configuration affects only selected
+ dpll device (i.e., ``DPLL_A_PIN_PRIO``, ``DPLL_A_PIN_STATE``,
+ ``DPLL_A_PIN_DIRECTION``).
+
+MUX-type pins
+=============
+
+A pin can be MUX-type, it aggregates child pins and serves as a pin
+multiplexer. One or more pins are registered with MUX-type instead of
+being directly registered to a dpll device.
+Pins registered with a MUX-type pin provide user with additional nested
+attribute ``DPLL_A_PIN_PARENT_PIN`` for each parent they were registered
+with.
+If a pin was registered with multiple parent pins, they behave like a
+multiple output multiplexer. In this case output of a
+``DPLL_CMD_PIN_GET`` would contain multiple pin-parent nested
+attributes with current state related to each parent, like::
+
+ 'pin': [{{
+ 'clock-id': 282574471561216,
+ 'module-name': 'ice',
+ 'capabilities': 4,
+ 'id': 13,
+ 'parent-pin': [
+ {'parent-id': 2, 'state': 'connected'},
+ {'parent-id': 3, 'state': 'disconnected'}
+ ],
+ 'type': 'synce-eth-port'
+ }}]
+
+Only one child pin can provide its signal to the parent MUX-type pin at
+a time, the selection is done by requesting change of a child pin state
+on desired parent, with the use of ``DPLL_A_PIN_PARENT`` nested
+attribute. Example of netlink `set state on parent pin` message format:
+
+ ========================== =============================================
+ ``DPLL_A_PIN_ID`` child pin id
+ ``DPLL_A_PIN_PARENT_PIN`` nested attribute for requesting configuration
+ related to parent pin
+ ``DPLL_A_PIN_PARENT_ID`` parent pin id
+ ``DPLL_A_PIN_STATE`` requested pin state on parent
+ ========================== =============================================
+
+Pin priority
+============
+
+Some devices might offer a capability of automatic pin selection mode
+(enum value ``DPLL_MODE_AUTOMATIC`` of ``DPLL_A_MODE`` attribute).
+Usually, automatic selection is performed on the hardware level, which
+means only pins directly connected to the dpll can be used for automatic
+input pin selection.
+In automatic selection mode, the user cannot manually select a input
+pin for the device, instead the user shall provide all directly
+connected pins with a priority ``DPLL_A_PIN_PRIO``, the device would
+pick a highest priority valid signal and use it to control the DPLL
+device. Example of netlink `set priority on parent pin` message format:
+
+ ============================ =============================================
+ ``DPLL_A_PIN_ID`` configured pin id
+ ``DPLL_A_PIN_PARENT_DEVICE`` nested attribute for requesting configuration
+ related to parent dpll device
+ ``DPLL_A_PIN_PARENT_ID`` parent dpll device id
+ ``DPLL_A_PIN_PRIO`` requested pin prio on parent dpll
+ ============================ =============================================
+
+Child pin of MUX-type pin is not capable of automatic input pin selection,
+in order to configure active input of a MUX-type pin, the user needs to
+request desired pin state of the child pin on the parent pin,
+as described in the ``MUX-type pins`` chapter.
+
+Configuration commands group
+============================
+
+Configuration commands are used to get information about registered
+dpll devices (and pins), as well as set configuration of device or pins.
+As dpll devices must be abstracted and reflect real hardware,
+there is no way to add new dpll device via netlink from user space and
+each device should be registered by its driver.
+
+All netlink commands require ``GENL_ADMIN_PERM``. This is to prevent
+any spamming/DoS from unauthorized userspace applications.
+
+List of netlink commands with possible attributes
+=================================================
+
+Constants identifying command types for dpll device uses a
+``DPLL_CMD_`` prefix and suffix according to command purpose.
+The dpll device related attributes use a ``DPLL_A_`` prefix and
+suffix according to attribute purpose.
+
+ ==================================== =================================
+ ``DPLL_CMD_DEVICE_ID_GET`` command to get device ID
+ ``DPLL_A_MODULE_NAME`` attr module name of registerer
+ ``DPLL_A_CLOCK_ID`` attr Unique Clock Identifier
+ (EUI-64), as defined by the
+ IEEE 1588 standard
+ ``DPLL_A_TYPE`` attr type of dpll device
+ ==================================== =================================
+
+ ==================================== =================================
+ ``DPLL_CMD_DEVICE_GET`` command to get device info or
+ dump list of available devices
+ ``DPLL_A_ID`` attr unique dpll device ID
+ ``DPLL_A_MODULE_NAME`` attr module name of registerer
+ ``DPLL_A_CLOCK_ID`` attr Unique Clock Identifier
+ (EUI-64), as defined by the
+ IEEE 1588 standard
+ ``DPLL_A_MODE`` attr selection mode
+ ``DPLL_A_MODE_SUPPORTED`` attr available selection modes
+ ``DPLL_A_LOCK_STATUS`` attr dpll device lock status
+ ``DPLL_A_TEMP`` attr device temperature info
+ ``DPLL_A_TYPE`` attr type of dpll device
+ ==================================== =================================
+
+ ==================================== =================================
+ ``DPLL_CMD_DEVICE_SET`` command to set dpll device config
+ ``DPLL_A_ID`` attr internal dpll device index
+ ``DPLL_A_MODE`` attr selection mode to configure
+ ==================================== =================================
+
+Constants identifying command types for pins uses a
+``DPLL_CMD_PIN_`` prefix and suffix according to command purpose.
+The pin related attributes use a ``DPLL_A_PIN_`` prefix and suffix
+according to attribute purpose.
+
+ ==================================== =================================
+ ``DPLL_CMD_PIN_ID_GET`` command to get pin ID
+ ``DPLL_A_PIN_MODULE_NAME`` attr module name of registerer
+ ``DPLL_A_PIN_CLOCK_ID`` attr Unique Clock Identifier
+ (EUI-64), as defined by the
+ IEEE 1588 standard
+ ``DPLL_A_PIN_BOARD_LABEL`` attr pin board label provided
+ by registerer
+ ``DPLL_A_PIN_PANEL_LABEL`` attr pin panel label provided
+ by registerer
+ ``DPLL_A_PIN_PACKAGE_LABEL`` attr pin package label provided
+ by registerer
+ ``DPLL_A_PIN_TYPE`` attr type of a pin
+ ==================================== =================================
+
+ ==================================== ==================================
+ ``DPLL_CMD_PIN_GET`` command to get pin info or dump
+ list of available pins
+ ``DPLL_A_PIN_ID`` attr unique a pin ID
+ ``DPLL_A_PIN_MODULE_NAME`` attr module name of registerer
+ ``DPLL_A_PIN_CLOCK_ID`` attr Unique Clock Identifier
+ (EUI-64), as defined by the
+ IEEE 1588 standard
+ ``DPLL_A_PIN_BOARD_LABEL`` attr pin board label provided
+ by registerer
+ ``DPLL_A_PIN_PANEL_LABEL`` attr pin panel label provided
+ by registerer
+ ``DPLL_A_PIN_PACKAGE_LABEL`` attr pin package label provided
+ by registerer
+ ``DPLL_A_PIN_TYPE`` attr type of a pin
+ ``DPLL_A_PIN_FREQUENCY`` attr current frequency of a pin
+ ``DPLL_A_PIN_FREQUENCY_SUPPORTED`` nested attr provides supported
+ frequencies
+ ``DPLL_A_PIN_ANY_FREQUENCY_MIN`` attr minimum value of frequency
+ ``DPLL_A_PIN_ANY_FREQUENCY_MAX`` attr maximum value of frequency
+ ``DPLL_A_PIN_PARENT_DEVICE`` nested attr for each parent device
+ the pin is connected with
+ ``DPLL_A_PIN_PARENT_ID`` attr parent dpll device id
+ ``DPLL_A_PIN_PRIO`` attr priority of pin on the
+ dpll device
+ ``DPLL_A_PIN_STATE`` attr state of pin on the parent
+ dpll device
+ ``DPLL_A_PIN_DIRECTION`` attr direction of a pin on the
+ parent dpll device
+ ``DPLL_A_PIN_PARENT_PIN`` nested attr for each parent pin
+ the pin is connected with
+ ``DPLL_A_PIN_PARENT_ID`` attr parent pin id
+ ``DPLL_A_PIN_STATE`` attr state of pin on the parent
+ pin
+ ``DPLL_A_PIN_CAPABILITIES`` attr bitmask of pin capabilities
+ ==================================== ==================================
+
+ ==================================== =================================
+ ``DPLL_CMD_PIN_SET`` command to set pins configuration
+ ``DPLL_A_PIN_ID`` attr unique a pin ID
+ ``DPLL_A_PIN_FREQUENCY`` attr requested frequency of a pin
+ ``DPLL_A_PIN_PARENT_DEVICE`` nested attr for each parent dpll
+ device configuration request
+ ``DPLL_A_PIN_PARENT_ID`` attr parent dpll device id
+ ``DPLL_A_PIN_DIRECTION`` attr requested direction of a pin
+ ``DPLL_A_PIN_PRIO`` attr requested priority of pin on
+ the dpll device
+ ``DPLL_A_PIN_STATE`` attr requested state of pin on
+ the dpll device
+ ``DPLL_A_PIN_PARENT_PIN`` nested attr for each parent pin
+ configuration request
+ ``DPLL_A_PIN_PARENT_ID`` attr parent pin id
+ ``DPLL_A_PIN_STATE`` attr requested state of pin on
+ parent pin
+ ==================================== =================================
+
+Netlink dump requests
+=====================
+
+The ``DPLL_CMD_DEVICE_GET`` and ``DPLL_CMD_PIN_GET`` commands are
+capable of dump type netlink requests, in which case the response is in
+the same format as for their ``do`` request, but every device or pin
+registered in the system is returned.
+
+SET commands format
+===================
+
+``DPLL_CMD_DEVICE_SET`` - to target a dpll device, the user provides
+``DPLL_A_ID``, which is unique identifier of dpll device in the system,
+as well as parameter being configured (``DPLL_A_MODE``).
+
+``DPLL_CMD_PIN_SET`` - to target a pin user must provide a
+``DPLL_A_PIN_ID``, which is unique identifier of a pin in the system.
+Also configured pin parameters must be added.
+If ``DPLL_A_PIN_FREQUENCY`` is configured, this affects all the dpll
+devices that are connected with the pin, that is why frequency attribute
+shall not be enclosed in ``DPLL_A_PIN_PARENT_DEVICE``.
+Other attributes: ``DPLL_A_PIN_PRIO``, ``DPLL_A_PIN_STATE`` or
+``DPLL_A_PIN_DIRECTION`` must be enclosed in
+``DPLL_A_PIN_PARENT_DEVICE`` as their configuration relates to only one
+of parent dplls, targeted by ``DPLL_A_PIN_PARENT_ID`` attribute which is
+also required inside that nest.
+For MUX-type pins the ``DPLL_A_PIN_STATE`` attribute is configured in
+similar way, by enclosing required state in ``DPLL_A_PIN_PARENT_PIN``
+nested attribute and targeted parent pin id in ``DPLL_A_PIN_PARENT_ID``.
+
+In general, it is possible to configure multiple parameters at once, but
+internally each parameter change will be invoked separately, where order
+of configuration is not guaranteed by any means.
+
+Configuration pre-defined enums
+===============================
+
+.. kernel-doc:: include/uapi/linux/dpll.h
+
+Notifications
+=============
+
+dpll device can provide notifications regarding status changes of the
+device, i.e. lock status changes, input/output changes or other alarms.
+There is one multicast group that is used to notify user-space apps via
+netlink socket: ``DPLL_MCGRP_MONITOR``
+
+Notifications messages:
+
+ ============================== =====================================
+ ``DPLL_CMD_DEVICE_CREATE_NTF`` dpll device was created
+ ``DPLL_CMD_DEVICE_DELETE_NTF`` dpll device was deleted
+ ``DPLL_CMD_DEVICE_CHANGE_NTF`` dpll device has changed
+ ``DPLL_CMD_PIN_CREATE_NTF`` dpll pin was created
+ ``DPLL_CMD_PIN_DELETE_NTF`` dpll pin was deleted
+ ``DPLL_CMD_PIN_CHANGE_NTF`` dpll pin has changed
+ ============================== =====================================
+
+Events format is the same as for the corresponding get command.
+Format of ``DPLL_CMD_DEVICE_`` events is the same as response of
+``DPLL_CMD_DEVICE_GET``.
+Format of ``DPLL_CMD_PIN_`` events is same as response of
+``DPLL_CMD_PIN_GET``.
+
+Device driver implementation
+============================
+
+Device is allocated by dpll_device_get() call. Second call with the
+same arguments will not create new object but provides pointer to
+previously created device for given arguments, it also increases
+refcount of that object.
+Device is deallocated by dpll_device_put() call, which first
+decreases the refcount, once refcount is cleared the object is
+destroyed.
+
+Device should implement set of operations and register device via
+dpll_device_register() at which point it becomes available to the
+users. Multiple driver instances can obtain reference to it with
+dpll_device_get(), as well as register dpll device with their own
+ops and priv.
+
+The pins are allocated separately with dpll_pin_get(), it works
+similarly to dpll_device_get(). Function first creates object and then
+for each call with the same arguments only the object refcount
+increases. Also dpll_pin_put() works similarly to dpll_device_put().
+
+A pin can be registered with parent dpll device or parent pin, depending
+on hardware needs. Each registration requires registerer to provide set
+of pin callbacks, and private data pointer for calling them:
+
+- dpll_pin_register() - register pin with a dpll device,
+- dpll_pin_on_pin_register() - register pin with another MUX type pin.
+
+Notifications of adding or removing dpll devices are created within
+subsystem itself.
+Notifications about registering/deregistering pins are also invoked by
+the subsystem.
+Notifications about status changes either of dpll device or a pin are
+invoked in two ways:
+
+- after successful change was requested on dpll subsystem, the subsystem
+ calls corresponding notification,
+- requested by device driver with dpll_device_change_ntf() or
+ dpll_pin_change_ntf() when driver informs about the status change.
+
+The device driver using dpll interface is not required to implement all
+the callback operation. Nevertheless, there are few required to be
+implemented.
+Required dpll device level callback operations:
+
+- ``.mode_get``,
+- ``.lock_status_get``.
+
+Required pin level callback operations:
+
+- ``.state_on_dpll_get`` (pins registered with dpll device),
+- ``.state_on_pin_get`` (pins registered with parent pin),
+- ``.direction_get``.
+
+Every other operation handler is checked for existence and
+``-EOPNOTSUPP`` is returned in case of absence of specific handler.
+
+The simplest implementation is in the OCP TimeCard driver. The ops
+structures are defined like this:
+
+.. code-block:: c
+
+ static const struct dpll_device_ops dpll_ops = {
+ .lock_status_get = ptp_ocp_dpll_lock_status_get,
+ .mode_get = ptp_ocp_dpll_mode_get,
+ .mode_supported = ptp_ocp_dpll_mode_supported,
+ };
+
+ static const struct dpll_pin_ops dpll_pins_ops = {
+ .frequency_get = ptp_ocp_dpll_frequency_get,
+ .frequency_set = ptp_ocp_dpll_frequency_set,
+ .direction_get = ptp_ocp_dpll_direction_get,
+ .direction_set = ptp_ocp_dpll_direction_set,
+ .state_on_dpll_get = ptp_ocp_dpll_state_get,
+ };
+
+The registration part is then looks like this part:
+
+.. code-block:: c
+
+ clkid = pci_get_dsn(pdev);
+ bp->dpll = dpll_device_get(clkid, 0, THIS_MODULE);
+ if (IS_ERR(bp->dpll)) {
+ err = PTR_ERR(bp->dpll);
+ dev_err(&pdev->dev, "dpll_device_alloc failed\n");
+ goto out;
+ }
+
+ err = dpll_device_register(bp->dpll, DPLL_TYPE_PPS, &dpll_ops, bp);
+ if (err)
+ goto out;
+
+ for (i = 0; i < OCP_SMA_NUM; i++) {
+ bp->sma[i].dpll_pin = dpll_pin_get(clkid, i, THIS_MODULE, &bp->sma[i].dpll_prop);
+ if (IS_ERR(bp->sma[i].dpll_pin)) {
+ err = PTR_ERR(bp->dpll);
+ goto out_dpll;
+ }
+
+ err = dpll_pin_register(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops,
+ &bp->sma[i]);
+ if (err) {
+ dpll_pin_put(bp->sma[i].dpll_pin);
+ goto out_dpll;
+ }
+ }
+
+In the error path we have to rewind every allocation in the reverse order:
+
+.. code-block:: c
+
+ while (i) {
+ --i;
+ dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, &bp->sma[i]);
+ dpll_pin_put(bp->sma[i].dpll_pin);
+ }
+ dpll_device_put(bp->dpll);
+
+More complex example can be found in Intel's ICE driver or nVidia's mlx5 driver.
+
+SyncE enablement
+================
+For SyncE enablement it is required to allow control over dpll device
+for a software application which monitors and configures the inputs of
+dpll device in response to current state of a dpll device and its
+inputs.
+In such scenario, dpll device input signal shall be also configurable
+to drive dpll with signal recovered from the PHY netdevice.
+This is done by exposing a pin to the netdevice - attaching pin to the
+netdevice itself with
+``netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)``.
+Exposed pin id handle ``DPLL_A_PIN_ID`` is then identifiable by the user
+as it is attached to rtnetlink respond to get ``RTM_NEWLINK`` command in
+nested attribute ``IFLA_DPLL_PIN``.
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 1e16a40da3ba..f549a68951d7 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -114,6 +114,7 @@ available subsections can be seen below.
zorro
hte/index
wmi
+ dpll
.. only:: subproject and html
diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml
new file mode 100644
index 000000000000..8b86b28b47a6
--- /dev/null
+++ b/Documentation/netlink/specs/dpll.yaml
@@ -0,0 +1,488 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: dpll
+
+doc: DPLL subsystem.
+
+definitions:
+ -
+ type: enum
+ name: mode
+ doc: |
+ working modes a dpll can support, differentiates if and how dpll selects
+ one of its inputs to syntonize with it, valid values for DPLL_A_MODE
+ attribute
+ entries:
+ -
+ name: manual
+ doc: input can be only selected by sending a request to dpll
+ value: 1
+ -
+ name: automatic
+ doc: highest prio input pin auto selected by dpll
+ render-max: true
+ -
+ type: enum
+ name: lock-status
+ doc: |
+ provides information of dpll device lock status, valid values for
+ DPLL_A_LOCK_STATUS attribute
+ entries:
+ -
+ name: unlocked
+ doc: |
+ dpll was not yet locked to any valid input (or forced by setting
+ DPLL_A_MODE to DPLL_MODE_DETACHED)
+ value: 1
+ -
+ name: locked
+ doc: |
+ dpll is locked to a valid signal, but no holdover available
+ -
+ name: locked-ho-acq
+ doc: |
+ dpll is locked and holdover acquired
+ -
+ name: holdover
+ doc: |
+ dpll is in holdover state - lost a valid lock or was forced
+ by disconnecting all the pins (latter possible only
+ when dpll lock-state was already DPLL_LOCK_STATUS_LOCKED_HO_ACQ,
+ if dpll lock-state was not DPLL_LOCK_STATUS_LOCKED_HO_ACQ, the
+ dpll's lock-state shall remain DPLL_LOCK_STATUS_UNLOCKED)
+ render-max: true
+ -
+ type: const
+ name: temp-divider
+ value: 1000
+ doc: |
+ temperature divider allowing userspace to calculate the
+ temperature as float with three digit decimal precision.
+ Value of (DPLL_A_TEMP / DPLL_TEMP_DIVIDER) is integer part of
+ temperature value.
+ Value of (DPLL_A_TEMP % DPLL_TEMP_DIVIDER) is fractional part of
+ temperature value.
+ -
+ type: enum
+ name: type
+ doc: type of dpll, valid values for DPLL_A_TYPE attribute
+ entries:
+ -
+ name: pps
+ doc: dpll produces Pulse-Per-Second signal
+ value: 1
+ -
+ name: eec
+ doc: dpll drives the Ethernet Equipment Clock
+ render-max: true
+ -
+ type: enum
+ name: pin-type
+ doc: |
+ defines possible types of a pin, valid values for DPLL_A_PIN_TYPE
+ attribute
+ entries:
+ -
+ name: mux
+ doc: aggregates another layer of selectable pins
+ value: 1
+ -
+ name: ext
+ doc: external input
+ -
+ name: synce-eth-port
+ doc: ethernet port PHY's recovered clock
+ -
+ name: int-oscillator
+ doc: device internal oscillator
+ -
+ name: gnss
+ doc: GNSS recovered clock
+ render-max: true
+ -
+ type: enum
+ name: pin-direction
+ doc: |
+ defines possible direction of a pin, valid values for
+ DPLL_A_PIN_DIRECTION attribute
+ entries:
+ -
+ name: input
+ doc: pin used as a input of a signal
+ value: 1
+ -
+ name: output
+ doc: pin used to output the signal
+ render-max: true
+ -
+ type: const
+ name: pin-frequency-1-hz
+ value: 1
+ -
+ type: const
+ name: pin-frequency-10-khz
+ value: 10000
+ -
+ type: const
+ name: pin-frequency-77_5-khz
+ value: 77500
+ -
+ type: const
+ name: pin-frequency-10-mhz
+ value: 10000000
+ -
+ type: enum
+ name: pin-state
+ doc: |
+ defines possible states of a pin, valid values for
+ DPLL_A_PIN_STATE attribute
+ entries:
+ -
+ name: connected
+ doc: pin connected, active input of phase locked loop
+ value: 1
+ -
+ name: disconnected
+ doc: pin disconnected, not considered as a valid input
+ -
+ name: selectable
+ doc: pin enabled for automatic input selection
+ render-max: true
+ -
+ type: flags
+ name: pin-capabilities
+ doc: |
+ defines possible capabilities of a pin, valid flags on
+ DPLL_A_PIN_CAPABILITIES attribute
+ entries:
+ -
+ name: direction-can-change
+ doc: pin direction can be changed
+ -
+ name: priority-can-change
+ doc: pin priority can be changed
+ -
+ name: state-can-change
+ doc: pin state can be changed
+
+attribute-sets:
+ -
+ name: dpll
+ enum-name: dpll_a
+ attributes:
+ -
+ name: id
+ type: u32
+ -
+ name: module-name
+ type: string
+ -
+ name: pad
+ type: pad
+ -
+ name: clock-id
+ type: u64
+ -
+ name: mode
+ type: u32
+ enum: mode
+ -
+ name: mode-supported
+ type: u32
+ enum: mode
+ multi-attr: true
+ -
+ name: lock-status
+ type: u32
+ enum: lock-status
+ -
+ name: temp
+ type: s32
+ -
+ name: type
+ type: u32
+ enum: type
+ -
+ name: pin
+ enum-name: dpll_a_pin
+ attributes:
+ -
+ name: id
+ type: u32
+ -
+ name: parent-id
+ type: u32
+ -
+ name: module-name
+ type: string
+ -
+ name: pad
+ type: pad
+ -
+ name: clock-id
+ type: u64
+ -
+ name: board-label
+ type: string
+ -
+ name: panel-label
+ type: string
+ -
+ name: package-label
+ type: string
+ -
+ name: type
+ type: u32
+ enum: pin-type
+ -
+ name: direction
+ type: u32
+ enum: pin-direction
+ -
+ name: frequency
+ type: u64
+ -
+ name: frequency-supported
+ type: nest
+ multi-attr: true
+ nested-attributes: frequency-range
+ -
+ name: frequency-min
+ type: u64
+ -
+ name: frequency-max
+ type: u64
+ -
+ name: prio
+ type: u32
+ -
+ name: state
+ type: u32
+ enum: pin-state
+ -
+ name: capabilities
+ type: u32
+ -
+ name: parent-device
+ type: nest
+ multi-attr: true
+ nested-attributes: pin-parent-device
+ -
+ name: parent-pin
+ type: nest
+ multi-attr: true
+ nested-attributes: pin-parent-pin
+ -
+ name: pin-parent-device
+ subset-of: pin
+ attributes:
+ -
+ name: parent-id
+ type: u32
+ -
+ name: direction
+ type: u32
+ -
+ name: prio
+ type: u32
+ -
+ name: state
+ type: u32
+ -
+ name: pin-parent-pin
+ subset-of: pin
+ attributes:
+ -
+ name: parent-id
+ type: u32
+ -
+ name: state
+ type: u32
+ -
+ name: frequency-range
+ subset-of: pin
+ attributes:
+ -
+ name: frequency-min
+ type: u64
+ -
+ name: frequency-max
+ type: u64
+
+operations:
+ enum-name: dpll_cmd
+ list:
+ -
+ name: device-id-get
+ doc: |
+ Get id of dpll device that matches given attributes
+ attribute-set: dpll
+ flags: [ admin-perm ]
+
+ do:
+ pre: dpll-lock-doit
+ post: dpll-unlock-doit
+ request:
+ attributes:
+ - module-name
+ - clock-id
+ - type
+ reply:
+ attributes:
+ - id
+
+ -
+ name: device-get
+ doc: |
+ Get list of DPLL devices (dump) or attributes of a single dpll device
+ attribute-set: dpll
+ flags: [ admin-perm ]
+
+ do:
+ pre: dpll-pre-doit
+ post: dpll-post-doit
+ request:
+ attributes:
+ - id
+ reply: &dev-attrs
+ attributes:
+ - id
+ - module-name
+ - mode
+ - mode-supported
+ - lock-status
+ - temp
+ - clock-id
+ - type
+
+ dump:
+ pre: dpll-lock-dumpit
+ post: dpll-unlock-dumpit
+ reply: *dev-attrs
+
+ -
+ name: device-set
+ doc: Set attributes for a DPLL device
+ attribute-set: dpll
+ flags: [ admin-perm ]
+
+ do:
+ pre: dpll-pre-doit
+ post: dpll-post-doit
+ request:
+ attributes:
+ - id
+ -
+ name: device-create-ntf
+ doc: Notification about device appearing
+ notify: device-get
+ mcgrp: monitor
+ -
+ name: device-delete-ntf
+ doc: Notification about device disappearing
+ notify: device-get
+ mcgrp: monitor
+ -
+ name: device-change-ntf
+ doc: Notification about device configuration being changed
+ notify: device-get
+ mcgrp: monitor
+ -
+ name: pin-id-get
+ doc: |
+ Get id of a pin that matches given attributes
+ attribute-set: pin
+ flags: [ admin-perm ]
+
+ do:
+ pre: dpll-lock-doit
+ post: dpll-unlock-doit
+ request:
+ attributes:
+ - module-name
+ - clock-id
+ - board-label
+ - panel-label
+ - package-label
+ - type
+ reply:
+ attributes:
+ - id
+
+ -
+ name: pin-get
+ doc: |
+ Get list of pins and its attributes.
+ - dump request without any attributes given - list all the pins in the
+ system
+ - dump request with target dpll - list all the pins registered with
+ a given dpll device
+ - do request with target dpll and target pin - single pin attributes
+ attribute-set: pin
+ flags: [ admin-perm ]
+
+ do:
+ pre: dpll-pin-pre-doit
+ post: dpll-pin-post-doit
+ request:
+ attributes:
+ - id
+ reply: &pin-attrs
+ attributes:
+ - id
+ - board-label
+ - panel-label
+ - package-label
+ - type
+ - frequency
+ - frequency-supported
+ - capabilities
+ - parent-device
+ - parent-pin
+
+ dump:
+ pre: dpll-lock-dumpit
+ post: dpll-unlock-dumpit
+ request:
+ attributes:
+ - id
+ reply: *pin-attrs
+
+ -
+ name: pin-set
+ doc: Set attributes of a target pin
+ attribute-set: pin
+ flags: [ admin-perm ]
+
+ do:
+ pre: dpll-pin-pre-doit
+ post: dpll-pin-post-doit
+ request:
+ attributes:
+ - id
+ - frequency
+ - direction
+ - prio
+ - state
+ - parent-device
+ - parent-pin
+ -
+ name: pin-create-ntf
+ doc: Notification about pin appearing
+ notify: pin-get
+ mcgrp: monitor
+ -
+ name: pin-delete-ntf
+ doc: Notification about pin disappearing
+ notify: pin-get
+ mcgrp: monitor
+ -
+ name: pin-change-ntf
+ doc: Notification about pin configuration being changed
+ notify: pin-get
+ mcgrp: monitor
+
+mcast-groups:
+ list:
+ -
+ name: monitor
diff --git a/Documentation/netlink/specs/handshake.yaml b/Documentation/netlink/specs/handshake.yaml
index 6d89e30f5fd5..b934cc513e3d 100644
--- a/Documentation/netlink/specs/handshake.yaml
+++ b/Documentation/netlink/specs/handshake.yaml
@@ -34,16 +34,16 @@ attribute-sets:
attributes:
-
name: cert
- type: u32
+ type: s32
-
name: privkey
- type: u32
+ type: s32
-
name: accept
attributes:
-
name: sockfd
- type: u32
+ type: s32
-
name: handler-class
type: u32
@@ -79,7 +79,7 @@ attribute-sets:
type: u32
-
name: sockfd
- type: u32
+ type: s32
-
name: remote-auth
type: u32
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 1c7284fd535b..14511b13f305 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -42,6 +42,19 @@ definitions:
doc:
This feature informs if netdev implements non-linear XDP buffer
support in ndo_xdp_xmit callback.
+ -
+ type: flags
+ name: xdp-rx-metadata
+ render-max: true
+ entries:
+ -
+ name: timestamp
+ doc:
+ Device is capable of exposing receive HW timestamp via bpf_xdp_metadata_rx_timestamp().
+ -
+ name: hash
+ doc:
+ Device is capable of exposing receive packet hash via bpf_xdp_metadata_rx_hash().
attribute-sets:
-
@@ -61,13 +74,18 @@ attribute-sets:
doc: Bitmask of enabled xdp-features.
type: u64
enum: xdp-act
- enum-as-flags: true
-
name: xdp-zc-max-segs
doc: max fragment count supported by ZC driver
type: u32
checks:
min: 1
+ -
+ name: xdp-rx-metadata-features
+ doc: Bitmask of supported XDP receive metadata features.
+ See Documentation/networking/xdp-rx-metadata.rst for more details.
+ type: u64
+ enum: xdp-rx-metadata
operations:
list:
@@ -84,6 +102,7 @@ operations:
- ifindex
- xdp-features
- xdp-zc-max-segs
+ - xdp-rx-metadata-features
dump:
reply: *dev-all
-
diff --git a/Documentation/networking/device_drivers/appletalk/cops.rst b/Documentation/networking/device_drivers/appletalk/cops.rst
deleted file mode 100644
index 964ba80599a9..000000000000
--- a/Documentation/networking/device_drivers/appletalk/cops.rst
+++ /dev/null
@@ -1,80 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-========================================
-The COPS LocalTalk Linux driver (cops.c)
-========================================
-
-By Jay Schulist <jschlst@samba.org>
-
-This driver has two modes and they are: Dayna mode and Tangent mode.
-Each mode corresponds with the type of card. It has been found
-that there are 2 main types of cards and all other cards are
-the same and just have different names or only have minor differences
-such as more IO ports. As this driver is tested it will
-become more clear exactly what cards are supported.
-
-Right now these cards are known to work with the COPS driver. The
-LT-200 cards work in a somewhat more limited capacity than the
-DL200 cards, which work very well and are in use by many people.
-
-TANGENT driver mode:
- - Tangent ATB-II, Novell NL-1000, Daystar Digital LT-200
-
-DAYNA driver mode:
- - Dayna DL2000/DaynaTalk PC (Half Length), COPS LT-95,
- - Farallon PhoneNET PC III, Farallon PhoneNET PC II
-
-Other cards possibly supported mode unknown though:
- - Dayna DL2000 (Full length)
-
-The COPS driver defaults to using Dayna mode. To change the driver's
-mode if you built a driver with dual support use board_type=1 or
-board_type=2 for Dayna or Tangent with insmod.
-
-Operation/loading of the driver
-===============================
-
-Use modprobe like this: /sbin/modprobe cops.o (IO #) (IRQ #)
-If you do not specify any options the driver will try and use the IO = 0x240,
-IRQ = 5. As of right now I would only use IRQ 5 for the card, if autoprobing.
-
-To load multiple COPS driver Localtalk cards you can do one of the following::
-
- insmod cops io=0x240 irq=5
- insmod -o cops2 cops io=0x260 irq=3
-
-Or in lilo.conf put something like this::
-
- append="ether=5,0x240,lt0 ether=3,0x260,lt1"
-
-Then bring up the interface with ifconfig. It will look something like this::
-
- lt0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-F7-00-00-00-00-00-00-00-00
- inet addr:192.168.1.2 Bcast:192.168.1.255 Mask:255.255.255.0
- UP BROADCAST RUNNING NOARP MULTICAST MTU:600 Metric:1
- RX packets:0 errors:0 dropped:0 overruns:0 frame:0
- TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 coll:0
-
-Netatalk Configuration
-======================
-
-You will need to configure atalkd with something like the following to make
-it work with the cops.c driver.
-
-* For single LTalk card use::
-
- dummy -seed -phase 2 -net 2000 -addr 2000.10 -zone "1033"
- lt0 -seed -phase 1 -net 1000 -addr 1000.50 -zone "1033"
-
-* For multiple cards, Ethernet and LocalTalk::
-
- eth0 -seed -phase 2 -net 3000 -addr 3000.20 -zone "1033"
- lt0 -seed -phase 1 -net 1000 -addr 1000.50 -zone "1033"
-
-* For multiple LocalTalk cards, and an Ethernet card.
-
-* Order seems to matter here, Ethernet last::
-
- lt0 -seed -phase 1 -net 1000 -addr 1000.10 -zone "LocalTalk1"
- lt1 -seed -phase 1 -net 2000 -addr 2000.20 -zone "LocalTalk2"
- eth0 -seed -phase 2 -net 3000 -addr 3000.30 -zone "EtherTalk"
diff --git a/Documentation/networking/device_drivers/appletalk/index.rst b/Documentation/networking/device_drivers/appletalk/index.rst
deleted file mode 100644
index c196baeb0856..000000000000
--- a/Documentation/networking/device_drivers/appletalk/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-
-AppleTalk Device Drivers
-========================
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
- cops
-
-.. only:: subproject and html
-
- Indices
- =======
-
- * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 9827e816084b..43de285b8a92 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -32,6 +32,7 @@ Contents:
intel/e1000
intel/e1000e
intel/fm10k
+ intel/idpf
intel/igb
intel/igbvf
intel/ixgbe
diff --git a/Documentation/networking/device_drivers/ethernet/intel/idpf.rst b/Documentation/networking/device_drivers/ethernet/intel/idpf.rst
new file mode 100644
index 000000000000..adb16e2abd21
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/intel/idpf.rst
@@ -0,0 +1,160 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+==========================================================================
+idpf Linux* Base Driver for the Intel(R) Infrastructure Data Path Function
+==========================================================================
+
+Intel idpf Linux driver.
+Copyright(C) 2023 Intel Corporation.
+
+.. contents::
+
+The idpf driver serves as both the Physical Function (PF) and Virtual Function
+(VF) driver for the Intel(R) Infrastructure Data Path Function.
+
+Driver information can be obtained using ethtool, lspci, and ip.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel adapter. All hardware requirements listed apply to use
+with Linux.
+
+
+Identifying Your Adapter
+========================
+For information on how to identify your adapter, and for the latest Intel
+network drivers, refer to the Intel Support website:
+http://www.intel.com/support
+
+
+Additional Features and Configurations
+======================================
+
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The latest ethtool
+version is required for this functionality. If you don't have one yet, you can
+obtain it at:
+https://kernel.org/pub/software/network/ethtool/
+
+
+Viewing Link Messages
+---------------------
+Link messages will not be displayed to the console if the distribution is
+restricting system messages. In order to see network driver link messages on
+your console, set dmesg to eight by entering the following::
+
+ # dmesg -n 8
+
+.. note::
+ This setting is not saved across reboots.
+
+
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit (MTU)
+to a value larger than the default value of 1500.
+
+Use the ip command to increase the MTU size. For example, enter the following
+where <ethX> is the interface number::
+
+ # ip link set mtu 9000 dev <ethX>
+ # ip link set up dev <ethX>
+
+.. note::
+ The maximum MTU setting for jumbo frames is 9706. This corresponds to the
+ maximum jumbo frame size of 9728 bytes.
+
+.. note::
+ This driver will attempt to use multiple page sized buffers to receive
+ each jumbo packet. This should help to avoid buffer starvation issues when
+ allocating receive packets.
+
+.. note::
+ Packet loss may have a greater impact on throughput when you use jumbo
+ frames. If you observe a drop in performance after enabling jumbo frames,
+ enabling flow control may mitigate the issue.
+
+
+Performance Optimization
+========================
+Driver defaults are meant to fit a wide variety of workloads, but if further
+optimization is required, we recommend experimenting with the following
+settings.
+
+
+Interrupt Rate Limiting
+-----------------------
+This driver supports an adaptive interrupt throttle rate (ITR) mechanism that
+is tuned for general workloads. The user can customize the interrupt rate
+control for specific workloads, via ethtool, adjusting the number of
+microseconds between interrupts.
+
+To set the interrupt rate manually, you must disable adaptive mode::
+
+ # ethtool -C <ethX> adaptive-rx off adaptive-tx off
+
+For lower CPU utilization:
+ - Disable adaptive ITR and lower Rx and Tx interrupts. The examples below
+ affect every queue of the specified interface.
+
+ - Setting rx-usecs and tx-usecs to 80 will limit interrupts to about
+ 12,500 interrupts per second per queue::
+
+ # ethtool -C <ethX> adaptive-rx off adaptive-tx off rx-usecs 80
+ tx-usecs 80
+
+For reduced latency:
+ - Disable adaptive ITR and ITR by setting rx-usecs and tx-usecs to 0
+ using ethtool::
+
+ # ethtool -C <ethX> adaptive-rx off adaptive-tx off rx-usecs 0
+ tx-usecs 0
+
+Per-queue interrupt rate settings:
+ - The following examples are for queues 1 and 3, but you can adjust other
+ queues.
+
+ - To disable Rx adaptive ITR and set static Rx ITR to 10 microseconds or
+ about 100,000 interrupts/second, for queues 1 and 3::
+
+ # ethtool --per-queue <ethX> queue_mask 0xa --coalesce adaptive-rx off
+ rx-usecs 10
+
+ - To show the current coalesce settings for queues 1 and 3::
+
+ # ethtool --per-queue <ethX> queue_mask 0xa --show-coalesce
+
+
+
+Virtualized Environments
+------------------------
+In addition to the other suggestions in this section, the following may be
+helpful to optimize performance in VMs.
+
+ - Using the appropriate mechanism (vcpupin) in the VM, pin the CPUs to
+ individual LCPUs, making sure to use a set of CPUs included in the
+ device's local_cpulist: /sys/class/net/<ethX>/device/local_cpulist.
+
+ - Configure as many Rx/Tx queues in the VM as available. (See the idpf driver
+ documentation for the number of queues supported.) For example::
+
+ # ethtool -L <virt_interface> rx <max> tx <max>
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+http://www.intel.com/support/
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to intel-wired-lan@lists.osuosl.org.
+
+
+Trademarks
+==========
+Intel is a trademark or registered trademark of Intel Corporation or its
+subsidiaries in the United States and/or other countries.
+
+* Other names and brands may be claimed as the property of others.
diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst
index 601eacaf12f3..2f0285a5bc80 100644
--- a/Documentation/networking/device_drivers/index.rst
+++ b/Documentation/networking/device_drivers/index.rst
@@ -8,7 +8,6 @@ Contents:
.. toctree::
:maxdepth: 2
- appletalk/index
atm/index
cable/index
can/index
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index a66054d0763a..f7dfde3b09a9 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -745,6 +745,13 @@ tcp_comp_sack_nr - INTEGER
Default : 44
+tcp_backlog_ack_defer - BOOLEAN
+ If set, user thread processing socket backlog tries sending
+ one ACK for the whole queue. This helps to avoid potential
+ long latencies at end of a TCP socket syscall.
+
+ Default : true
+
tcp_slow_start_after_idle - BOOLEAN
If set, provide RFC2861 behavior and time out the congestion
window after an idle period. An idle period is defined at
@@ -2304,6 +2311,17 @@ accept_ra_pinfo - BOOLEAN
- enabled if accept_ra is enabled.
- disabled if accept_ra is disabled.
+ra_honor_pio_life - BOOLEAN
+ Whether to use RFC4862 Section 5.5.3e to determine the valid
+ lifetime of an address matching a prefix sent in a Router
+ Advertisement Prefix Information Option.
+
+ - If enabled, the PIO valid lifetime will always be honored.
+ - If disabled, RFC4862 section 5.5.3e is used to determine
+ the valid lifetime of the address.
+
+ Default: 0 (disabled)
+
accept_ra_rt_info_min_plen - INTEGER
Minimum prefix length of Route Information in RA.
diff --git a/Documentation/networking/pktgen.rst b/Documentation/networking/pktgen.rst
index 1225f0f63ff0..c945218946e1 100644
--- a/Documentation/networking/pktgen.rst
+++ b/Documentation/networking/pktgen.rst
@@ -178,6 +178,7 @@ Examples::
IPSEC # IPsec encapsulation (needs CONFIG_XFRM)
NODE_ALLOC # node specific memory allocation
NO_TIMESTAMP # disable timestamping
+ SHARED # enable shared SKB
pgset 'flag ![name]' Clear a flag to determine behaviour.
Note that you might need to use single quote in
interactive mode, so that your shell wouldn't expand
@@ -288,6 +289,16 @@ To avoid breaking existing testbed scripts for using AH type and tunnel mode,
you can use "pgset spi SPI_VALUE" to specify which transformation mode
to employ.
+Disable shared SKB
+==================
+By default, SKBs sent by pktgen are shared (user count > 1).
+To test with non-shared SKBs, remove the "SHARED" flag by simply setting::
+
+ pg_set "flag !SHARED"
+
+However, if the "clone_skb" or "burst" parameters are configured, the skb
+still needs to be held by pktgen for further access. Hence the skb must be
+shared.
Current commands and configuration options
==========================================
@@ -357,6 +368,7 @@ Current commands and configuration options
IPSEC
NODE_ALLOC
NO_TIMESTAMP
+ SHARED
spi (ipsec)
diff --git a/Documentation/networking/xdp-rx-metadata.rst b/Documentation/networking/xdp-rx-metadata.rst
index 25ce72af81c2..205696780b78 100644
--- a/Documentation/networking/xdp-rx-metadata.rst
+++ b/Documentation/networking/xdp-rx-metadata.rst
@@ -105,6 +105,13 @@ bpf_tail_call
Adding programs that access metadata kfuncs to the ``BPF_MAP_TYPE_PROG_ARRAY``
is currently not supported.
+Supported Devices
+=================
+
+It is possible to query which kfunc the particular netdev implements via
+netlink. See ``xdp-rx-metadata-features`` attribute set in
+``Documentation/netlink/specs/netdev.yaml``.
+
Example
=======
diff --git a/MAINTAINERS b/MAINTAINERS
index 81d5fc0bba68..d9f6abd77fc7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3595,9 +3595,10 @@ F: Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
F: drivers/iio/accel/bma400*
BPF JIT for ARM
-M: Shubham Bansal <illusionist.neo@gmail.com>
+M: Russell King <linux@armlinux.org.uk>
+M: Puranjay Mohan <puranjay12@gmail.com>
L: bpf@vger.kernel.org
-S: Odd Fixes
+S: Maintained
F: arch/arm/net/
BPF JIT for ARM64
@@ -4319,8 +4320,7 @@ F: drivers/net/ethernet/broadcom/bcmsysport.*
F: drivers/net/ethernet/broadcom/unimac.h
BROADCOM TG3 GIGABIT ETHERNET DRIVER
-M: Siva Reddy Kallam <siva.kallam@broadcom.com>
-M: Prashant Sreedharan <prashant@broadcom.com>
+M: Pavan Chebbi <pavan.chebbi@broadcom.com>
M: Michael Chan <mchan@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
@@ -6333,6 +6333,17 @@ F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-drive
F: drivers/net/ethernet/freescale/dpaa2/dpaa2-switch*
F: drivers/net/ethernet/freescale/dpaa2/dpsw*
+DPLL SUBSYSTEM
+M: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+M: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+M: Jiri Pirko <jiri@resnulli.us>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/driver-api/dpll.rst
+F: drivers/dpll/*
+F: include/linux/dpll.h
+F: include/uapi/linux/dpll.h
+
DRBD DRIVER
M: Philipp Reisner <philipp.reisner@linbit.com>
M: Lars Ellenberg <lars.ellenberg@linbit.com>
@@ -14344,9 +14355,11 @@ MIPS/LOONGSON1 ARCHITECTURE
M: Keguang Zhang <keguang.zhang@gmail.com>
L: linux-mips@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/*/loongson,ls1*.yaml
F: arch/mips/include/asm/mach-loongson32/
F: arch/mips/loongson32/
F: drivers/*/*loongson1*
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
MIPS/LOONGSON2EF ARCHITECTURE
M: Jiaxun Yang <jiaxun.yang@flygoat.com>
@@ -23679,6 +23692,11 @@ F: Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
F: drivers/gpio/gpio-xilinx.c
F: drivers/gpio/gpio-zynq.c
+XILINX LL TEMAC ETHERNET DRIVER
+L: netdev@vger.kernel.org
+S: Obsolete
+F: drivers/net/ethernet/xilinx/ll_temac*
+
XILINX PWM DRIVER
M: Sean Anderson <sean.anderson@seco.com>
S: Maintained
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6a1c9fca5260..1d672457d02f 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -2,6 +2,7 @@
/*
* Just-In-Time compiler for eBPF filters on 32bit ARM
*
+ * Copyright (c) 2023 Puranjay Mohan <puranjay12@gmail.com>
* Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
* Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
*/
@@ -15,6 +16,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
+#include <linux/math64.h>
#include <asm/cacheflush.h>
#include <asm/hwcap.h>
@@ -228,6 +230,44 @@ static u32 jit_mod32(u32 dividend, u32 divisor)
return dividend % divisor;
}
+static s32 jit_sdiv32(s32 dividend, s32 divisor)
+{
+ return dividend / divisor;
+}
+
+static s32 jit_smod32(s32 dividend, s32 divisor)
+{
+ return dividend % divisor;
+}
+
+/* Wrappers for 64-bit div/mod */
+static u64 jit_udiv64(u64 dividend, u64 divisor)
+{
+ return div64_u64(dividend, divisor);
+}
+
+static u64 jit_mod64(u64 dividend, u64 divisor)
+{
+ u64 rem;
+
+ div64_u64_rem(dividend, divisor, &rem);
+ return rem;
+}
+
+static s64 jit_sdiv64(s64 dividend, s64 divisor)
+{
+ return div64_s64(dividend, divisor);
+}
+
+static s64 jit_smod64(s64 dividend, s64 divisor)
+{
+ u64 q;
+
+ q = div64_s64(dividend, divisor);
+
+ return dividend - q * divisor;
+}
+
static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
{
inst |= (cond << 28);
@@ -333,6 +373,9 @@ static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
#define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
#define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
+#define ARM_LDRSH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRSH_I, rt, rn, off)
+#define ARM_LDRSB_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRSB_I, rt, rn, off)
+
#define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
#define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
#define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
@@ -474,17 +517,18 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
return to - from - 2;
}
-static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
+static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op, u8 sign)
{
const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
const s8 *tmp = bpf2a32[TMP_REG_1];
+ u32 dst;
#if __LINUX_ARM_ARCH__ == 7
if (elf_hwcap & HWCAP_IDIVA) {
- if (op == BPF_DIV)
- emit(ARM_UDIV(rd, rm, rn), ctx);
- else {
- emit(ARM_UDIV(ARM_IP, rm, rn), ctx);
+ if (op == BPF_DIV) {
+ emit(sign ? ARM_SDIV(rd, rm, rn) : ARM_UDIV(rd, rm, rn), ctx);
+ } else {
+ emit(sign ? ARM_SDIV(ARM_IP, rm, rn) : ARM_UDIV(ARM_IP, rm, rn), ctx);
emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
}
return;
@@ -512,8 +556,19 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
/* Call appropriate function */
- emit_mov_i(ARM_IP, op == BPF_DIV ?
- (u32)jit_udiv32 : (u32)jit_mod32, ctx);
+ if (sign) {
+ if (op == BPF_DIV)
+ dst = (u32)jit_sdiv32;
+ else
+ dst = (u32)jit_smod32;
+ } else {
+ if (op == BPF_DIV)
+ dst = (u32)jit_udiv32;
+ else
+ dst = (u32)jit_mod32;
+ }
+
+ emit_mov_i(ARM_IP, dst, ctx);
emit_blx_r(ARM_IP, ctx);
/* Restore caller-saved registers from stack */
@@ -530,6 +585,78 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
}
+static inline void emit_udivmod64(const s8 *rd, const s8 *rm, const s8 *rn, struct jit_ctx *ctx,
+ u8 op, u8 sign)
+{
+ u32 dst;
+
+ /* Push caller-saved registers on stack */
+ emit(ARM_PUSH(CALLER_MASK), ctx);
+
+ /*
+ * As we are implementing 64-bit div/mod as function calls, We need to put the dividend in
+ * R0-R1 and the divisor in R2-R3. As we have already pushed these registers on the stack,
+ * we can recover them later after returning from the function call.
+ */
+ if (rm[1] != ARM_R0 || rn[1] != ARM_R2) {
+ /*
+ * Move Rm to {R1, R0} if it is not already there.
+ */
+ if (rm[1] != ARM_R0) {
+ if (rn[1] == ARM_R0)
+ emit(ARM_PUSH(BIT(ARM_R0) | BIT(ARM_R1)), ctx);
+ emit(ARM_MOV_R(ARM_R1, rm[0]), ctx);
+ emit(ARM_MOV_R(ARM_R0, rm[1]), ctx);
+ if (rn[1] == ARM_R0) {
+ emit(ARM_POP(BIT(ARM_R2) | BIT(ARM_R3)), ctx);
+ goto cont;
+ }
+ }
+ /*
+ * Move Rn to {R3, R2} if it is not already there.
+ */
+ if (rn[1] != ARM_R2) {
+ emit(ARM_MOV_R(ARM_R3, rn[0]), ctx);
+ emit(ARM_MOV_R(ARM_R2, rn[1]), ctx);
+ }
+ }
+
+cont:
+
+ /* Call appropriate function */
+ if (sign) {
+ if (op == BPF_DIV)
+ dst = (u32)jit_sdiv64;
+ else
+ dst = (u32)jit_smod64;
+ } else {
+ if (op == BPF_DIV)
+ dst = (u32)jit_udiv64;
+ else
+ dst = (u32)jit_mod64;
+ }
+
+ emit_mov_i(ARM_IP, dst, ctx);
+ emit_blx_r(ARM_IP, ctx);
+
+ /* Save return value */
+ if (rd[1] != ARM_R0) {
+ emit(ARM_MOV_R(rd[0], ARM_R1), ctx);
+ emit(ARM_MOV_R(rd[1], ARM_R0), ctx);
+ }
+
+ /* Recover {R3, R2} and {R1, R0} from stack if they are not Rd */
+ if (rd[1] != ARM_R0 && rd[1] != ARM_R2) {
+ emit(ARM_POP(CALLER_MASK), ctx);
+ } else if (rd[1] != ARM_R0) {
+ emit(ARM_POP(BIT(ARM_R0) | BIT(ARM_R1)), ctx);
+ emit(ARM_ADD_I(ARM_SP, ARM_SP, 8), ctx);
+ } else {
+ emit(ARM_ADD_I(ARM_SP, ARM_SP, 8), ctx);
+ emit(ARM_POP(BIT(ARM_R2) | BIT(ARM_R3)), ctx);
+ }
+}
+
/* Is the translated BPF register on stack? */
static bool is_stacked(s8 reg)
{
@@ -744,12 +871,16 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
}
/* dst = src (4 bytes)*/
-static inline void emit_a32_mov_r(const s8 dst, const s8 src,
+static inline void emit_a32_mov_r(const s8 dst, const s8 src, const u8 off,
struct jit_ctx *ctx) {
const s8 *tmp = bpf2a32[TMP_REG_1];
s8 rt;
rt = arm_bpf_get_reg32(src, tmp[0], ctx);
+ if (off && off != 32) {
+ emit(ARM_LSL_I(rt, rt, 32 - off), ctx);
+ emit(ARM_ASR_I(rt, rt, 32 - off), ctx);
+ }
arm_bpf_put_reg32(dst, rt, ctx);
}
@@ -758,15 +889,15 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
const s8 src[],
struct jit_ctx *ctx) {
if (!is64) {
- emit_a32_mov_r(dst_lo, src_lo, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
if (!ctx->prog->aux->verifier_zext)
/* Zero out high 4 bytes */
emit_a32_mov_i(dst_hi, 0, ctx);
} else if (__LINUX_ARM_ARCH__ < 6 &&
ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
/* complete 8 byte move */
- emit_a32_mov_r(dst_lo, src_lo, ctx);
- emit_a32_mov_r(dst_hi, src_hi, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
+ emit_a32_mov_r(dst_hi, src_hi, 0, ctx);
} else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
const u8 *tmp = bpf2a32[TMP_REG_1];
@@ -782,6 +913,24 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
}
}
+/* dst = (signed)src */
+static inline void emit_a32_movsx_r64(const bool is64, const u8 off, const s8 dst[], const s8 src[],
+ struct jit_ctx *ctx) {
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *rt;
+
+ rt = arm_bpf_get_reg64(dst, tmp, ctx);
+
+ emit_a32_mov_r(dst_lo, src_lo, off, ctx);
+ if (!is64) {
+ if (!ctx->prog->aux->verifier_zext)
+ /* Zero out high 4 bytes */
+ emit_a32_mov_i(dst_hi, 0, ctx);
+ } else {
+ emit(ARM_ASR_I(rt[0], rt[1], 31), ctx);
+ }
+}
+
/* Shift operations */
static inline void emit_a32_alu_i(const s8 dst, const u32 val,
struct jit_ctx *ctx, const u8 op) {
@@ -1026,6 +1175,24 @@ static bool is_ldst_imm(s16 off, const u8 size)
return -off_max <= off && off <= off_max;
}
+static bool is_ldst_imm8(s16 off, const u8 size)
+{
+ s16 off_max = 0;
+
+ switch (size) {
+ case BPF_B:
+ off_max = 0xff;
+ break;
+ case BPF_W:
+ off_max = 0xfff;
+ break;
+ case BPF_H:
+ off_max = 0xff;
+ break;
+ }
+ return -off_max <= off && off <= off_max;
+}
+
/* *(size *)(dst + off) = src */
static inline void emit_str_r(const s8 dst, const s8 src[],
s16 off, struct jit_ctx *ctx, const u8 sz){
@@ -1105,6 +1272,50 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src,
arm_bpf_put_reg64(dst, rd, ctx);
}
+/* dst = *(signed size*)(src + off) */
+static inline void emit_ldsx_r(const s8 dst[], const s8 src,
+ s16 off, struct jit_ctx *ctx, const u8 sz){
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
+ s8 rm = src;
+ int add_off;
+
+ if (!is_ldst_imm8(off, sz)) {
+ /*
+ * offset does not fit in the load/store immediate,
+ * construct an ADD instruction to apply the offset.
+ */
+ add_off = imm8m(off);
+ if (add_off > 0) {
+ emit(ARM_ADD_I(tmp[0], src, add_off), ctx);
+ rm = tmp[0];
+ } else {
+ emit_a32_mov_i(tmp[0], off, ctx);
+ emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
+ rm = tmp[0];
+ }
+ off = 0;
+ }
+
+ switch (sz) {
+ case BPF_B:
+ /* Load a Byte with sign extension*/
+ emit(ARM_LDRSB_I(rd[1], rm, off), ctx);
+ break;
+ case BPF_H:
+ /* Load a HalfWord with sign extension*/
+ emit(ARM_LDRSH_I(rd[1], rm, off), ctx);
+ break;
+ case BPF_W:
+ /* Load a Word*/
+ emit(ARM_LDR_I(rd[1], rm, off), ctx);
+ break;
+ }
+ /* Carry the sign extension to upper 32 bits */
+ emit(ARM_ASR_I(rd[0], rd[1], 31), ctx);
+ arm_bpf_put_reg64(dst, rd, ctx);
+}
+
/* Arithmatic Operation */
static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
const u8 rn, struct jit_ctx *ctx, u8 op,
@@ -1385,7 +1596,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
emit_a32_mov_i(dst_hi, 0, ctx);
break;
}
- emit_a32_mov_r64(is64, dst, src, ctx);
+ if (insn->off)
+ emit_a32_movsx_r64(is64, insn->off, dst, src, ctx);
+ else
+ emit_a32_mov_r64(is64, dst, src, ctx);
break;
case BPF_K:
/* Sign-extend immediate value to destination reg */
@@ -1461,7 +1675,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
rt = src_lo;
break;
}
- emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
+ emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code), off);
arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
if (!ctx->prog->aux->verifier_zext)
emit_a32_mov_i(dst_hi, 0, ctx);
@@ -1470,7 +1684,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU64 | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_X:
- goto notyet;
+ rd = arm_bpf_get_reg64(dst, tmp2, ctx);
+ switch (BPF_SRC(code)) {
+ case BPF_X:
+ rs = arm_bpf_get_reg64(src, tmp, ctx);
+ break;
+ case BPF_K:
+ rs = tmp;
+ emit_a32_mov_se_i64(is64, rs, imm, ctx);
+ break;
+ }
+ emit_udivmod64(rd, rd, rs, ctx, BPF_OP(code), off);
+ arm_bpf_put_reg64(dst, rd, ctx);
+ break;
/* dst = dst << imm */
/* dst = dst >> imm */
/* dst = dst >> imm (signed) */
@@ -1545,10 +1771,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break;
/* dst = htole(dst) */
/* dst = htobe(dst) */
- case BPF_ALU | BPF_END | BPF_FROM_LE:
- case BPF_ALU | BPF_END | BPF_FROM_BE:
+ case BPF_ALU | BPF_END | BPF_FROM_LE: /* also BPF_TO_LE */
+ case BPF_ALU | BPF_END | BPF_FROM_BE: /* also BPF_TO_BE */
+ /* dst = bswap(dst) */
+ case BPF_ALU64 | BPF_END | BPF_FROM_LE: /* also BPF_TO_LE */
rd = arm_bpf_get_reg64(dst, tmp, ctx);
- if (BPF_SRC(code) == BPF_FROM_LE)
+ if (BPF_SRC(code) == BPF_FROM_LE && BPF_CLASS(code) != BPF_ALU64)
goto emit_bswap_uxt;
switch (imm) {
case 16:
@@ -1603,8 +1831,15 @@ exit:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_DW:
+ /* LDSX: dst = *(signed size *)(src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_B:
+ case BPF_LDX | BPF_MEMSX | BPF_H:
+ case BPF_LDX | BPF_MEMSX | BPF_W:
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
- emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
+ if (BPF_MODE(insn->code) == BPF_MEMSX)
+ emit_ldsx_r(dst, rn, off, ctx, BPF_SIZE(code));
+ else
+ emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
@@ -1761,10 +1996,15 @@ go_jmp:
break;
/* JMP OFF */
case BPF_JMP | BPF_JA:
+ case BPF_JMP32 | BPF_JA:
{
- if (off == 0)
+ if (BPF_CLASS(code) == BPF_JMP32 && imm != 0)
+ jmp_offset = bpf2a32_offset(i + imm, i, ctx);
+ else if (BPF_CLASS(code) == BPF_JMP && off != 0)
+ jmp_offset = bpf2a32_offset(i + off, i, ctx);
+ else
break;
- jmp_offset = bpf2a32_offset(i+off, i, ctx);
+
check_imm24(jmp_offset);
emit(ARM_B(jmp_offset), ctx);
break;
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index e0b593a1498d..438f0e1f91a0 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -79,9 +79,11 @@
#define ARM_INST_LDST__IMM12 0x00000fff
#define ARM_INST_LDRB_I 0x05500000
#define ARM_INST_LDRB_R 0x07d00000
+#define ARM_INST_LDRSB_I 0x015000d0
#define ARM_INST_LDRD_I 0x014000d0
#define ARM_INST_LDRH_I 0x015000b0
#define ARM_INST_LDRH_R 0x019000b0
+#define ARM_INST_LDRSH_I 0x015000f0
#define ARM_INST_LDR_I 0x05100000
#define ARM_INST_LDR_R 0x07900000
@@ -137,6 +139,7 @@
#define ARM_INST_TST_I 0x03100000
#define ARM_INST_UDIV 0x0730f010
+#define ARM_INST_SDIV 0x0710f010
#define ARM_INST_UMULL 0x00800090
@@ -265,6 +268,7 @@
#define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm)
#define ARM_UDIV(rd, rn, rm) (ARM_INST_UDIV | (rd) << 16 | (rn) | (rm) << 8)
+#define ARM_SDIV(rd, rn, rm) (ARM_INST_SDIV | (rd) << 16 | (rn) | (rm) << 8)
#define ARM_UMULL(rd_lo, rd_hi, rn, rm) (ARM_INST_UMULL | (rd_hi) << 16 \
| (rd_lo) << 12 | (rm) << 8 | rn)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 150d1c6543f7..7d4af64e3982 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -288,7 +288,7 @@ static bool is_lsi_offset(int offset, int scale)
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
{
const struct bpf_prog *prog = ctx->prog;
- const bool is_main_prog = prog->aux->func_idx == 0;
+ const bool is_main_prog = !bpf_is_subprog(prog);
const u8 r6 = bpf2a64[BPF_REG_6];
const u8 r7 = bpf2a64[BPF_REG_7];
const u8 r8 = bpf2a64[BPF_REG_8];
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 2861e3360aff..e6a643f63ebf 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -556,7 +556,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
jit->prologue_plt_ret = jit->prg;
- if (fp->aux->func_idx == 0) {
+ if (!bpf_is_subprog(fp)) {
/* Initialize the tail call counter in the main program. */
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a5930042139d..8c10d9abc239 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -16,6 +16,9 @@
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
#include <asm/text-patching.h>
+#include <asm/unwind.h>
+
+static bool all_callee_regs_used[4] = {true, true, true, true};
static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
{
@@ -255,6 +258,14 @@ struct jit_context {
/* Number of bytes that will be skipped on tailcall */
#define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE)
+static void push_r12(u8 **pprog)
+{
+ u8 *prog = *pprog;
+
+ EMIT2(0x41, 0x54); /* push r12 */
+ *pprog = prog;
+}
+
static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
@@ -270,6 +281,14 @@ static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
*pprog = prog;
}
+static void pop_r12(u8 **pprog)
+{
+ u8 *prog = *pprog;
+
+ EMIT2(0x41, 0x5C); /* pop r12 */
+ *pprog = prog;
+}
+
static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
@@ -291,7 +310,8 @@ static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
* while jumping to another program
*/
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
- bool tail_call_reachable, bool is_subprog)
+ bool tail_call_reachable, bool is_subprog,
+ bool is_exception_cb)
{
u8 *prog = *pprog;
@@ -303,12 +323,30 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
prog += X86_PATCH_SIZE;
if (!ebpf_from_cbpf) {
if (tail_call_reachable && !is_subprog)
+ /* When it's the entry of the whole tailcall context,
+ * zeroing rax means initialising tail_call_cnt.
+ */
EMIT2(0x31, 0xC0); /* xor eax, eax */
else
+ /* Keep the same instruction layout. */
EMIT2(0x66, 0x90); /* nop2 */
}
- EMIT1(0x55); /* push rbp */
- EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
+ /* Exception callback receives FP as third parameter */
+ if (is_exception_cb) {
+ EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
+ EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
+ /* The main frame must have exception_boundary as true, so we
+ * first restore those callee-saved regs from stack, before
+ * reusing the stack frame.
+ */
+ pop_callee_regs(&prog, all_callee_regs_used);
+ pop_r12(&prog);
+ /* Reset the stack frame. */
+ EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
+ } else {
+ EMIT1(0x55); /* push rbp */
+ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
+ }
/* X86_TAIL_CALL_OFFSET is here */
EMIT_ENDBR();
@@ -467,7 +505,8 @@ static void emit_return(u8 **pprog, u8 *ip)
* goto *(prog->bpf_func + prologue_size);
* out:
*/
-static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
+static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
+ u8 **pprog, bool *callee_regs_used,
u32 stack_depth, u8 *ip,
struct jit_context *ctx)
{
@@ -517,7 +556,12 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
offset = ctx->tail_call_indirect_label - (prog + 2 - start);
EMIT2(X86_JE, offset); /* je out */
- pop_callee_regs(&prog, callee_regs_used);
+ if (bpf_prog->aux->exception_boundary) {
+ pop_callee_regs(&prog, all_callee_regs_used);
+ pop_r12(&prog);
+ } else {
+ pop_callee_regs(&prog, callee_regs_used);
+ }
EMIT1(0x58); /* pop rax */
if (stack_depth)
@@ -541,7 +585,8 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
*pprog = prog;
}
-static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
+static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
+ struct bpf_jit_poke_descriptor *poke,
u8 **pprog, u8 *ip,
bool *callee_regs_used, u32 stack_depth,
struct jit_context *ctx)
@@ -570,7 +615,13 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
poke->tailcall_bypass);
- pop_callee_regs(&prog, callee_regs_used);
+ if (bpf_prog->aux->exception_boundary) {
+ pop_callee_regs(&prog, all_callee_regs_used);
+ pop_r12(&prog);
+ } else {
+ pop_callee_regs(&prog, callee_regs_used);
+ }
+
EMIT1(0x58); /* pop rax */
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
@@ -1018,6 +1069,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
+/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+#define RESTORE_TAIL_CALL_CNT(stack) \
+ EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
{
@@ -1041,8 +1096,20 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
emit_prologue(&prog, bpf_prog->aux->stack_depth,
bpf_prog_was_classic(bpf_prog), tail_call_reachable,
- bpf_prog->aux->func_idx != 0);
- push_callee_regs(&prog, callee_regs_used);
+ bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
+ /* Exception callback will clobber callee regs for its own use, and
+ * restore the original callee regs from main prog's stack frame.
+ */
+ if (bpf_prog->aux->exception_boundary) {
+ /* We also need to save r12, which is not mapped to any BPF
+ * register, as we throw after entry into the kernel, which may
+ * overwrite r12.
+ */
+ push_r12(&prog);
+ push_callee_regs(&prog, all_callee_regs_used);
+ } else {
+ push_callee_regs(&prog, callee_regs_used);
+ }
ilen = prog - temp;
if (rw_image)
@@ -1623,9 +1690,7 @@ st: if (is_imm8(insn->off))
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
- /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
- EMIT3_off32(0x48, 0x8B, 0x85,
- -round_up(bpf_prog->aux->stack_depth, 8) - 8);
+ RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
if (!imm32)
return -EINVAL;
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
@@ -1641,13 +1706,15 @@ st: if (is_imm8(insn->off))
case BPF_JMP | BPF_TAIL_CALL:
if (imm32)
- emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
+ emit_bpf_tail_call_direct(bpf_prog,
+ &bpf_prog->aux->poke_tab[imm32 - 1],
&prog, image + addrs[i - 1],
callee_regs_used,
bpf_prog->aux->stack_depth,
ctx);
else
- emit_bpf_tail_call_indirect(&prog,
+ emit_bpf_tail_call_indirect(bpf_prog,
+ &prog,
callee_regs_used,
bpf_prog->aux->stack_depth,
image + addrs[i - 1],
@@ -1900,7 +1967,12 @@ emit_jmp:
seen_exit = true;
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
- pop_callee_regs(&prog, callee_regs_used);
+ if (bpf_prog->aux->exception_boundary) {
+ pop_callee_regs(&prog, all_callee_regs_used);
+ pop_r12(&prog);
+ } else {
+ pop_callee_regs(&prog, callee_regs_used);
+ }
EMIT1(0xC9); /* leave */
emit_return(&prog, image + addrs[i - 1] + (prog - temp));
break;
@@ -2400,6 +2472,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
* [ ... ]
* [ stack_arg2 ]
* RBP - arg_stack_off [ stack_arg1 ]
+ * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
*/
/* room for return value of orig_call or fentry prog */
@@ -2464,6 +2537,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
else
/* sub rsp, stack_size */
EMIT4(0x48, 0x83, 0xEC, stack_size);
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ EMIT1(0x50); /* push rax */
/* mov QWORD PTR [rbp - rbx_off], rbx */
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
@@ -2516,9 +2591,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
restore_regs(m, &prog, regs_off);
save_args(m, &prog, arg_stack_off, true);
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ /* Before calling the original function, restore the
+ * tail_call_cnt from stack to rax.
+ */
+ RESTORE_TAIL_CALL_CNT(stack_size);
+
if (flags & BPF_TRAMP_F_ORIG_STACK) {
- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
- EMIT2(0xff, 0xd0); /* call *rax */
+ emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
+ EMIT2(0xff, 0xd3); /* call *rbx */
} else {
/* call original function */
if (emit_rsb_call(&prog, orig_call, prog)) {
@@ -2569,7 +2650,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
ret = -EINVAL;
goto cleanup;
}
- }
+ } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ /* Before running the original function, restore the
+ * tail_call_cnt from stack to rax.
+ */
+ RESTORE_TAIL_CALL_CNT(stack_size);
+
/* restore return value of orig_call or fentry prog back into RAX */
if (save_ret)
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
@@ -2913,3 +2999,29 @@ void bpf_jit_free(struct bpf_prog *prog)
bpf_prog_unlock_free(prog);
}
+
+bool bpf_jit_supports_exceptions(void)
+{
+ /* We unwind through both kernel frames (starting from within bpf_throw
+ * call) and BPF frames. Therefore we require ORC unwinder to be enabled
+ * to walk kernel frames and reach BPF frames in the stack trace.
+ */
+ return IS_ENABLED(CONFIG_UNWINDER_ORC);
+}
+
+void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
+{
+#if defined(CONFIG_UNWINDER_ORC)
+ struct unwind_state state;
+ unsigned long addr;
+
+ for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
+ unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
+ break;
+ }
+ return;
+#endif
+ WARN(1, "verification of programs using bpf_throw should have failed\n");
+}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index efb66e25fa2d..8ba3e8b9ad72 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -243,4 +243,6 @@ source "drivers/hte/Kconfig"
source "drivers/cdx/Kconfig"
+source "drivers/dpll/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 1bec7819a837..722d15be0eb7 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -197,5 +197,6 @@ obj-$(CONFIG_PECI) += peci/
obj-$(CONFIG_HTE) += hte/
obj-$(CONFIG_DRM_ACCEL) += accel/
obj-$(CONFIG_CDX_BUS) += cdx/
+obj-$(CONFIG_DPLL) += dpll/
obj-$(CONFIG_S390) += s390/
diff --git a/drivers/dpll/Kconfig b/drivers/dpll/Kconfig
new file mode 100644
index 000000000000..a4cae73f20d3
--- /dev/null
+++ b/drivers/dpll/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Generic DPLL drivers configuration
+#
+
+config DPLL
+ bool
diff --git a/drivers/dpll/Makefile b/drivers/dpll/Makefile
new file mode 100644
index 000000000000..2e5b27850110
--- /dev/null
+++ b/drivers/dpll/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for DPLL drivers.
+#
+
+obj-$(CONFIG_DPLL) += dpll.o
+dpll-y += dpll_core.o
+dpll-y += dpll_netlink.o
+dpll-y += dpll_nl.o
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
new file mode 100644
index 000000000000..3568149b9562
--- /dev/null
+++ b/drivers/dpll/dpll_core.c
@@ -0,0 +1,798 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dpll_core.c - DPLL subsystem kernel-space interface implementation.
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel Corporation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "dpll_core.h"
+#include "dpll_netlink.h"
+
+/* Mutex lock to protect DPLL subsystem devices and pins */
+DEFINE_MUTEX(dpll_lock);
+
+DEFINE_XARRAY_FLAGS(dpll_device_xa, XA_FLAGS_ALLOC);
+DEFINE_XARRAY_FLAGS(dpll_pin_xa, XA_FLAGS_ALLOC);
+
+static u32 dpll_xa_id;
+
+#define ASSERT_DPLL_REGISTERED(d) \
+ WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
+#define ASSERT_DPLL_NOT_REGISTERED(d) \
+ WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
+#define ASSERT_PIN_REGISTERED(p) \
+ WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
+
+struct dpll_device_registration {
+ struct list_head list;
+ const struct dpll_device_ops *ops;
+ void *priv;
+};
+
+struct dpll_pin_registration {
+ struct list_head list;
+ const struct dpll_pin_ops *ops;
+ void *priv;
+};
+
+struct dpll_device *dpll_device_get_by_id(int id)
+{
+ if (xa_get_mark(&dpll_device_xa, id, DPLL_REGISTERED))
+ return xa_load(&dpll_device_xa, id);
+
+ return NULL;
+}
+
+static struct dpll_pin_registration *
+dpll_pin_registration_find(struct dpll_pin_ref *ref,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ struct dpll_pin_registration *reg;
+
+ list_for_each_entry(reg, &ref->registration_list, list) {
+ if (reg->ops == ops && reg->priv == priv)
+ return reg;
+ }
+ return NULL;
+}
+
+static int
+dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ struct dpll_pin_registration *reg;
+ struct dpll_pin_ref *ref;
+ bool ref_exists = false;
+ unsigned long i;
+ int ret;
+
+ xa_for_each(xa_pins, i, ref) {
+ if (ref->pin != pin)
+ continue;
+ reg = dpll_pin_registration_find(ref, ops, priv);
+ if (reg) {
+ refcount_inc(&ref->refcount);
+ return 0;
+ }
+ ref_exists = true;
+ break;
+ }
+
+ if (!ref_exists) {
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return -ENOMEM;
+ ref->pin = pin;
+ INIT_LIST_HEAD(&ref->registration_list);
+ ret = xa_insert(xa_pins, pin->pin_idx, ref, GFP_KERNEL);
+ if (ret) {
+ kfree(ref);
+ return ret;
+ }
+ refcount_set(&ref->refcount, 1);
+ }
+
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ if (!ref_exists) {
+ xa_erase(xa_pins, pin->pin_idx);
+ kfree(ref);
+ }
+ return -ENOMEM;
+ }
+ reg->ops = ops;
+ reg->priv = priv;
+ if (ref_exists)
+ refcount_inc(&ref->refcount);
+ list_add_tail(&reg->list, &ref->registration_list);
+
+ return 0;
+}
+
+static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ struct dpll_pin_registration *reg;
+ struct dpll_pin_ref *ref;
+ unsigned long i;
+
+ xa_for_each(xa_pins, i, ref) {
+ if (ref->pin != pin)
+ continue;
+ reg = dpll_pin_registration_find(ref, ops, priv);
+ if (WARN_ON(!reg))
+ return -EINVAL;
+ if (refcount_dec_and_test(&ref->refcount)) {
+ list_del(&reg->list);
+ kfree(reg);
+ xa_erase(xa_pins, i);
+ WARN_ON(!list_empty(&ref->registration_list));
+ kfree(ref);
+ }
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ struct dpll_pin_registration *reg;
+ struct dpll_pin_ref *ref;
+ bool ref_exists = false;
+ unsigned long i;
+ int ret;
+
+ xa_for_each(xa_dplls, i, ref) {
+ if (ref->dpll != dpll)
+ continue;
+ reg = dpll_pin_registration_find(ref, ops, priv);
+ if (reg) {
+ refcount_inc(&ref->refcount);
+ return 0;
+ }
+ ref_exists = true;
+ break;
+ }
+
+ if (!ref_exists) {
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return -ENOMEM;
+ ref->dpll = dpll;
+ INIT_LIST_HEAD(&ref->registration_list);
+ ret = xa_insert(xa_dplls, dpll->id, ref, GFP_KERNEL);
+ if (ret) {
+ kfree(ref);
+ return ret;
+ }
+ refcount_set(&ref->refcount, 1);
+ }
+
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ if (!ref_exists) {
+ xa_erase(xa_dplls, dpll->id);
+ kfree(ref);
+ }
+ return -ENOMEM;
+ }
+ reg->ops = ops;
+ reg->priv = priv;
+ if (ref_exists)
+ refcount_inc(&ref->refcount);
+ list_add_tail(&reg->list, &ref->registration_list);
+
+ return 0;
+}
+
+static void
+dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ struct dpll_pin_registration *reg;
+ struct dpll_pin_ref *ref;
+ unsigned long i;
+
+ xa_for_each(xa_dplls, i, ref) {
+ if (ref->dpll != dpll)
+ continue;
+ reg = dpll_pin_registration_find(ref, ops, priv);
+ if (WARN_ON(!reg))
+ return;
+ if (refcount_dec_and_test(&ref->refcount)) {
+ list_del(&reg->list);
+ kfree(reg);
+ xa_erase(xa_dplls, i);
+ WARN_ON(!list_empty(&ref->registration_list));
+ kfree(ref);
+ }
+ return;
+ }
+}
+
+struct dpll_pin_ref *dpll_xa_ref_dpll_first(struct xarray *xa_refs)
+{
+ struct dpll_pin_ref *ref;
+ unsigned long i = 0;
+
+ ref = xa_find(xa_refs, &i, ULONG_MAX, XA_PRESENT);
+ WARN_ON(!ref);
+ return ref;
+}
+
+static struct dpll_device *
+dpll_device_alloc(const u64 clock_id, u32 device_idx, struct module *module)
+{
+ struct dpll_device *dpll;
+ int ret;
+
+ dpll = kzalloc(sizeof(*dpll), GFP_KERNEL);
+ if (!dpll)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&dpll->refcount, 1);
+ INIT_LIST_HEAD(&dpll->registration_list);
+ dpll->device_idx = device_idx;
+ dpll->clock_id = clock_id;
+ dpll->module = module;
+ ret = xa_alloc_cyclic(&dpll_device_xa, &dpll->id, dpll, xa_limit_32b,
+ &dpll_xa_id, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(dpll);
+ return ERR_PTR(ret);
+ }
+ xa_init_flags(&dpll->pin_refs, XA_FLAGS_ALLOC);
+
+ return dpll;
+}
+
+/**
+ * dpll_device_get - find existing or create new dpll device
+ * @clock_id: clock_id of creator
+ * @device_idx: idx given by device driver
+ * @module: reference to registering module
+ *
+ * Get existing object of a dpll device, unique for given arguments.
+ * Create new if doesn't exist yet.
+ *
+ * Context: Acquires a lock (dpll_lock)
+ * Return:
+ * * valid dpll_device struct pointer if succeeded
+ * * ERR_PTR(X) - error
+ */
+struct dpll_device *
+dpll_device_get(u64 clock_id, u32 device_idx, struct module *module)
+{
+ struct dpll_device *dpll, *ret = NULL;
+ unsigned long index;
+
+ mutex_lock(&dpll_lock);
+ xa_for_each(&dpll_device_xa, index, dpll) {
+ if (dpll->clock_id == clock_id &&
+ dpll->device_idx == device_idx &&
+ dpll->module == module) {
+ ret = dpll;
+ refcount_inc(&ret->refcount);
+ break;
+ }
+ }
+ if (!ret)
+ ret = dpll_device_alloc(clock_id, device_idx, module);
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpll_device_get);
+
+/**
+ * dpll_device_put - decrease the refcount and free memory if possible
+ * @dpll: dpll_device struct pointer
+ *
+ * Context: Acquires a lock (dpll_lock)
+ * Drop reference for a dpll device, if all references are gone, delete
+ * dpll device object.
+ */
+void dpll_device_put(struct dpll_device *dpll)
+{
+ mutex_lock(&dpll_lock);
+ if (refcount_dec_and_test(&dpll->refcount)) {
+ ASSERT_DPLL_NOT_REGISTERED(dpll);
+ WARN_ON_ONCE(!xa_empty(&dpll->pin_refs));
+ xa_destroy(&dpll->pin_refs);
+ xa_erase(&dpll_device_xa, dpll->id);
+ WARN_ON(!list_empty(&dpll->registration_list));
+ kfree(dpll);
+ }
+ mutex_unlock(&dpll_lock);
+}
+EXPORT_SYMBOL_GPL(dpll_device_put);
+
+static struct dpll_device_registration *
+dpll_device_registration_find(struct dpll_device *dpll,
+ const struct dpll_device_ops *ops, void *priv)
+{
+ struct dpll_device_registration *reg;
+
+ list_for_each_entry(reg, &dpll->registration_list, list) {
+ if (reg->ops == ops && reg->priv == priv)
+ return reg;
+ }
+ return NULL;
+}
+
+/**
+ * dpll_device_register - register the dpll device in the subsystem
+ * @dpll: pointer to a dpll
+ * @type: type of a dpll
+ * @ops: ops for a dpll device
+ * @priv: pointer to private information of owner
+ *
+ * Make dpll device available for user space.
+ *
+ * Context: Acquires a lock (dpll_lock)
+ * Return:
+ * * 0 on success
+ * * negative - error value
+ */
+int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
+ const struct dpll_device_ops *ops, void *priv)
+{
+ struct dpll_device_registration *reg;
+ bool first_registration = false;
+
+ if (WARN_ON(!ops))
+ return -EINVAL;
+ if (WARN_ON(!ops->mode_get))
+ return -EINVAL;
+ if (WARN_ON(!ops->lock_status_get))
+ return -EINVAL;
+ if (WARN_ON(type < DPLL_TYPE_PPS || type > DPLL_TYPE_MAX))
+ return -EINVAL;
+
+ mutex_lock(&dpll_lock);
+ reg = dpll_device_registration_find(dpll, ops, priv);
+ if (reg) {
+ mutex_unlock(&dpll_lock);
+ return -EEXIST;
+ }
+
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ mutex_unlock(&dpll_lock);
+ return -ENOMEM;
+ }
+ reg->ops = ops;
+ reg->priv = priv;
+ dpll->type = type;
+ first_registration = list_empty(&dpll->registration_list);
+ list_add_tail(&reg->list, &dpll->registration_list);
+ if (!first_registration) {
+ mutex_unlock(&dpll_lock);
+ return 0;
+ }
+
+ xa_set_mark(&dpll_device_xa, dpll->id, DPLL_REGISTERED);
+ dpll_device_create_ntf(dpll);
+ mutex_unlock(&dpll_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpll_device_register);
+
+/**
+ * dpll_device_unregister - unregister dpll device
+ * @dpll: registered dpll pointer
+ * @ops: ops for a dpll device
+ * @priv: pointer to private information of owner
+ *
+ * Unregister device, make it unavailable for userspace.
+ * Note: It does not free the memory
+ * Context: Acquires a lock (dpll_lock)
+ */
+void dpll_device_unregister(struct dpll_device *dpll,
+ const struct dpll_device_ops *ops, void *priv)
+{
+ struct dpll_device_registration *reg;
+
+ mutex_lock(&dpll_lock);
+ ASSERT_DPLL_REGISTERED(dpll);
+ dpll_device_delete_ntf(dpll);
+ reg = dpll_device_registration_find(dpll, ops, priv);
+ if (WARN_ON(!reg)) {
+ mutex_unlock(&dpll_lock);
+ return;
+ }
+ list_del(&reg->list);
+ kfree(reg);
+
+ if (!list_empty(&dpll->registration_list)) {
+ mutex_unlock(&dpll_lock);
+ return;
+ }
+ xa_clear_mark(&dpll_device_xa, dpll->id, DPLL_REGISTERED);
+ mutex_unlock(&dpll_lock);
+}
+EXPORT_SYMBOL_GPL(dpll_device_unregister);
+
+static struct dpll_pin *
+dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
+ const struct dpll_pin_properties *prop)
+{
+ struct dpll_pin *pin;
+ int ret;
+
+ pin = kzalloc(sizeof(*pin), GFP_KERNEL);
+ if (!pin)
+ return ERR_PTR(-ENOMEM);
+ pin->pin_idx = pin_idx;
+ pin->clock_id = clock_id;
+ pin->module = module;
+ if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
+ prop->type > DPLL_PIN_TYPE_MAX)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ pin->prop = prop;
+ refcount_set(&pin->refcount, 1);
+ xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
+ xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
+ ret = xa_alloc(&dpll_pin_xa, &pin->id, pin, xa_limit_16b, GFP_KERNEL);
+ if (ret)
+ goto err;
+ return pin;
+err:
+ xa_destroy(&pin->dpll_refs);
+ xa_destroy(&pin->parent_refs);
+ kfree(pin);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dpll_pin_get - find existing or create new dpll pin
+ * @clock_id: clock_id of creator
+ * @pin_idx: idx given by dev driver
+ * @module: reference to registering module
+ * @prop: dpll pin properties
+ *
+ * Get existing object of a pin (unique for given arguments) or create new
+ * if doesn't exist yet.
+ *
+ * Context: Acquires a lock (dpll_lock)
+ * Return:
+ * * valid allocated dpll_pin struct pointer if succeeded
+ * * ERR_PTR(X) - error
+ */
+struct dpll_pin *
+dpll_pin_get(u64 clock_id, u32 pin_idx, struct module *module,
+ const struct dpll_pin_properties *prop)
+{
+ struct dpll_pin *pos, *ret = NULL;
+ unsigned long i;
+
+ mutex_lock(&dpll_lock);
+ xa_for_each(&dpll_pin_xa, i, pos) {
+ if (pos->clock_id == clock_id &&
+ pos->pin_idx == pin_idx &&
+ pos->module == module) {
+ ret = pos;
+ refcount_inc(&ret->refcount);
+ break;
+ }
+ }
+ if (!ret)
+ ret = dpll_pin_alloc(clock_id, pin_idx, module, prop);
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpll_pin_get);
+
+/**
+ * dpll_pin_put - decrease the refcount and free memory if possible
+ * @pin: pointer to a pin to be put
+ *
+ * Drop reference for a pin, if all references are gone, delete pin object.
+ *
+ * Context: Acquires a lock (dpll_lock)
+ */
+void dpll_pin_put(struct dpll_pin *pin)
+{
+ mutex_lock(&dpll_lock);
+ if (refcount_dec_and_test(&pin->refcount)) {
+ xa_destroy(&pin->dpll_refs);
+ xa_destroy(&pin->parent_refs);
+ xa_erase(&dpll_pin_xa, pin->id);
+ kfree(pin);
+ }
+ mutex_unlock(&dpll_lock);
+}
+EXPORT_SYMBOL_GPL(dpll_pin_put);
+
+static int
+__dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ int ret;
+
+ ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv);
+ if (ret)
+ return ret;
+ ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv);
+ if (ret)
+ goto ref_pin_del;
+ xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
+ dpll_pin_create_ntf(pin);
+
+ return ret;
+
+ref_pin_del:
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
+ return ret;
+}
+
+/**
+ * dpll_pin_register - register the dpll pin in the subsystem
+ * @dpll: pointer to a dpll
+ * @pin: pointer to a dpll pin
+ * @ops: ops for a dpll pin ops
+ * @priv: pointer to private information of owner
+ *
+ * Context: Acquires a lock (dpll_lock)
+ * Return:
+ * * 0 on success
+ * * negative - error value
+ */
+int
+dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ int ret;
+
+ if (WARN_ON(!ops) ||
+ WARN_ON(!ops->state_on_dpll_get) ||
+ WARN_ON(!ops->direction_get))
+ return -EINVAL;
+ if (ASSERT_DPLL_REGISTERED(dpll))
+ return -EINVAL;
+
+ mutex_lock(&dpll_lock);
+ if (WARN_ON(!(dpll->module == pin->module &&
+ dpll->clock_id == pin->clock_id)))
+ ret = -EINVAL;
+ else
+ ret = __dpll_pin_register(dpll, pin, ops, priv);
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpll_pin_register);
+
+static void
+__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
+ dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
+ if (xa_empty(&pin->dpll_refs))
+ xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
+}
+
+/**
+ * dpll_pin_unregister - unregister dpll pin from dpll device
+ * @dpll: registered dpll pointer
+ * @pin: pointer to a pin
+ * @ops: ops for a dpll pin
+ * @priv: pointer to private information of owner
+ *
+ * Note: It does not free the memory
+ * Context: Acquires a lock (dpll_lock)
+ */
+void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ if (WARN_ON(xa_empty(&dpll->pin_refs)))
+ return;
+ if (WARN_ON(!xa_empty(&pin->parent_refs)))
+ return;
+
+ mutex_lock(&dpll_lock);
+ dpll_pin_delete_ntf(pin);
+ __dpll_pin_unregister(dpll, pin, ops, priv);
+ mutex_unlock(&dpll_lock);
+}
+EXPORT_SYMBOL_GPL(dpll_pin_unregister);
+
+/**
+ * dpll_pin_on_pin_register - register a pin with a parent pin
+ * @parent: pointer to a parent pin
+ * @pin: pointer to a pin
+ * @ops: ops for a dpll pin
+ * @priv: pointer to private information of owner
+ *
+ * Register a pin with a parent pin, create references between them and
+ * between newly registered pin and dplls connected with a parent pin.
+ *
+ * Context: Acquires a lock (dpll_lock)
+ * Return:
+ * * 0 on success
+ * * negative - error value
+ */
+int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ struct dpll_pin_ref *ref;
+ unsigned long i, stop;
+ int ret;
+
+ if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX))
+ return -EINVAL;
+
+ if (WARN_ON(!ops) ||
+ WARN_ON(!ops->state_on_pin_get) ||
+ WARN_ON(!ops->direction_get))
+ return -EINVAL;
+ if (ASSERT_PIN_REGISTERED(parent))
+ return -EINVAL;
+
+ mutex_lock(&dpll_lock);
+ ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
+ if (ret)
+ goto unlock;
+ refcount_inc(&pin->refcount);
+ xa_for_each(&parent->dpll_refs, i, ref) {
+ ret = __dpll_pin_register(ref->dpll, pin, ops, priv);
+ if (ret) {
+ stop = i;
+ goto dpll_unregister;
+ }
+ dpll_pin_create_ntf(pin);
+ }
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+
+dpll_unregister:
+ xa_for_each(&parent->dpll_refs, i, ref)
+ if (i < stop) {
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ dpll_pin_delete_ntf(pin);
+ }
+ refcount_dec(&pin->refcount);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+unlock:
+ mutex_unlock(&dpll_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpll_pin_on_pin_register);
+
+/**
+ * dpll_pin_on_pin_unregister - unregister dpll pin from a parent pin
+ * @parent: pointer to a parent pin
+ * @pin: pointer to a pin
+ * @ops: ops for a dpll pin
+ * @priv: pointer to private information of owner
+ *
+ * Context: Acquires a lock (dpll_lock)
+ * Note: It does not free the memory
+ */
+void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv)
+{
+ struct dpll_pin_ref *ref;
+ unsigned long i;
+
+ mutex_lock(&dpll_lock);
+ dpll_pin_delete_ntf(pin);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ refcount_dec(&pin->refcount);
+ xa_for_each(&pin->dpll_refs, i, ref)
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ mutex_unlock(&dpll_lock);
+}
+EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
+
+static struct dpll_device_registration *
+dpll_device_registration_first(struct dpll_device *dpll)
+{
+ struct dpll_device_registration *reg;
+
+ reg = list_first_entry_or_null((struct list_head *)&dpll->registration_list,
+ struct dpll_device_registration, list);
+ WARN_ON(!reg);
+ return reg;
+}
+
+void *dpll_priv(struct dpll_device *dpll)
+{
+ struct dpll_device_registration *reg;
+
+ reg = dpll_device_registration_first(dpll);
+ return reg->priv;
+}
+
+const struct dpll_device_ops *dpll_device_ops(struct dpll_device *dpll)
+{
+ struct dpll_device_registration *reg;
+
+ reg = dpll_device_registration_first(dpll);
+ return reg->ops;
+}
+
+static struct dpll_pin_registration *
+dpll_pin_registration_first(struct dpll_pin_ref *ref)
+{
+ struct dpll_pin_registration *reg;
+
+ reg = list_first_entry_or_null(&ref->registration_list,
+ struct dpll_pin_registration, list);
+ WARN_ON(!reg);
+ return reg;
+}
+
+void *dpll_pin_on_dpll_priv(struct dpll_device *dpll,
+ struct dpll_pin *pin)
+{
+ struct dpll_pin_registration *reg;
+ struct dpll_pin_ref *ref;
+
+ ref = xa_load(&dpll->pin_refs, pin->pin_idx);
+ if (!ref)
+ return NULL;
+ reg = dpll_pin_registration_first(ref);
+ return reg->priv;
+}
+
+void *dpll_pin_on_pin_priv(struct dpll_pin *parent,
+ struct dpll_pin *pin)
+{
+ struct dpll_pin_registration *reg;
+ struct dpll_pin_ref *ref;
+
+ ref = xa_load(&pin->parent_refs, parent->pin_idx);
+ if (!ref)
+ return NULL;
+ reg = dpll_pin_registration_first(ref);
+ return reg->priv;
+}
+
+const struct dpll_pin_ops *dpll_pin_ops(struct dpll_pin_ref *ref)
+{
+ struct dpll_pin_registration *reg;
+
+ reg = dpll_pin_registration_first(ref);
+ return reg->ops;
+}
+
+static int __init dpll_init(void)
+{
+ int ret;
+
+ ret = genl_register_family(&dpll_nl_family);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ mutex_destroy(&dpll_lock);
+ return ret;
+}
+
+static void __exit dpll_exit(void)
+{
+ genl_unregister_family(&dpll_nl_family);
+ mutex_destroy(&dpll_lock);
+}
+
+subsys_initcall(dpll_init);
+module_exit(dpll_exit);
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
new file mode 100644
index 000000000000..5585873c5c1b
--- /dev/null
+++ b/drivers/dpll/dpll_core.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel and affiliates
+ */
+
+#ifndef __DPLL_CORE_H__
+#define __DPLL_CORE_H__
+
+#include <linux/dpll.h>
+#include <linux/list.h>
+#include <linux/refcount.h>
+#include "dpll_nl.h"
+
+#define DPLL_REGISTERED XA_MARK_1
+
+/**
+ * struct dpll_device - stores DPLL device internal data
+ * @id: unique id number for device given by dpll subsystem
+ * @device_idx: id given by dev driver
+ * @clock_id: unique identifier (clock_id) of a dpll
+ * @module: module of creator
+ * @type: type of a dpll
+ * @pin_refs: stores pins registered within a dpll
+ * @refcount: refcount
+ * @registration_list: list of registered ops and priv data of dpll owners
+ **/
+struct dpll_device {
+ u32 id;
+ u32 device_idx;
+ u64 clock_id;
+ struct module *module;
+ enum dpll_type type;
+ struct xarray pin_refs;
+ refcount_t refcount;
+ struct list_head registration_list;
+};
+
+/**
+ * struct dpll_pin - structure for a dpll pin
+ * @id: unique id number for pin given by dpll subsystem
+ * @pin_idx: index of a pin given by dev driver
+ * @clock_id: clock_id of creator
+ * @module: module of creator
+ * @dpll_refs: hold referencees to dplls pin was registered with
+ * @parent_refs: hold references to parent pins pin was registered with
+ * @prop: pointer to pin properties given by registerer
+ * @rclk_dev_name: holds name of device when pin can recover clock from it
+ * @refcount: refcount
+ **/
+struct dpll_pin {
+ u32 id;
+ u32 pin_idx;
+ u64 clock_id;
+ struct module *module;
+ struct xarray dpll_refs;
+ struct xarray parent_refs;
+ const struct dpll_pin_properties *prop;
+ refcount_t refcount;
+};
+
+/**
+ * struct dpll_pin_ref - structure for referencing either dpll or pins
+ * @dpll: pointer to a dpll
+ * @pin: pointer to a pin
+ * @registration_list: list of ops and priv data registered with the ref
+ * @refcount: refcount
+ **/
+struct dpll_pin_ref {
+ union {
+ struct dpll_device *dpll;
+ struct dpll_pin *pin;
+ };
+ struct list_head registration_list;
+ refcount_t refcount;
+};
+
+void *dpll_priv(struct dpll_device *dpll);
+void *dpll_pin_on_dpll_priv(struct dpll_device *dpll, struct dpll_pin *pin);
+void *dpll_pin_on_pin_priv(struct dpll_pin *parent, struct dpll_pin *pin);
+
+const struct dpll_device_ops *dpll_device_ops(struct dpll_device *dpll);
+struct dpll_device *dpll_device_get_by_id(int id);
+const struct dpll_pin_ops *dpll_pin_ops(struct dpll_pin_ref *ref);
+struct dpll_pin_ref *dpll_xa_ref_dpll_first(struct xarray *xa_refs);
+extern struct xarray dpll_device_xa;
+extern struct xarray dpll_pin_xa;
+extern struct mutex dpll_lock;
+#endif
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
new file mode 100644
index 000000000000..e20daba6896a
--- /dev/null
+++ b/drivers/dpll/dpll_netlink.c
@@ -0,0 +1,1253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic netlink for DPLL management framework
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel and affiliates
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <net/genetlink.h>
+#include "dpll_core.h"
+#include "dpll_netlink.h"
+#include "dpll_nl.h"
+#include <uapi/linux/dpll.h>
+
+#define ASSERT_NOT_NULL(ptr) (WARN_ON(!ptr))
+
+#define xa_for_each_marked_start(xa, index, entry, filter, start) \
+ for (index = start, entry = xa_find(xa, &index, ULONG_MAX, filter); \
+ entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
+
+struct dpll_dump_ctx {
+ unsigned long idx;
+};
+
+static struct dpll_dump_ctx *dpll_dump_context(struct netlink_callback *cb)
+{
+ return (struct dpll_dump_ctx *)cb->ctx;
+}
+
+static int
+dpll_msg_add_dev_handle(struct sk_buff *msg, struct dpll_device *dpll)
+{
+ if (nla_put_u32(msg, DPLL_A_ID, dpll->id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
+{
+ if (nla_put_u32(msg, DPLL_A_PIN_PARENT_ID, id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+/**
+ * dpll_msg_pin_handle_size - get size of pin handle attribute for given pin
+ * @pin: pin pointer
+ *
+ * Return: byte size of pin handle attribute for given pin.
+ */
+size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
+{
+ return pin ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
+}
+EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
+
+/**
+ * dpll_msg_add_pin_handle - attach pin handle attribute to a given message
+ * @msg: pointer to sk_buff message to attach a pin handle
+ * @pin: pin pointer
+ *
+ * Return:
+ * * 0 - success
+ * * -EMSGSIZE - no space in message to attach pin handle
+ */
+int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+{
+ if (!pin)
+ return 0;
+ if (nla_put_u32(msg, DPLL_A_PIN_ID, pin->id))
+ return -EMSGSIZE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpll_msg_add_pin_handle);
+
+static int
+dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_mode mode;
+ int ret;
+
+ ret = ops->mode_get(dpll, dpll_priv(dpll), &mode, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_MODE, mode))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+dpll_msg_add_mode_supported(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_mode mode;
+
+ if (!ops->mode_supported)
+ return 0;
+ for (mode = DPLL_MODE_MANUAL; mode <= DPLL_MODE_MAX; mode++)
+ if (ops->mode_supported(dpll, dpll_priv(dpll), mode, extack))
+ if (nla_put_u32(msg, DPLL_A_MODE_SUPPORTED, mode))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_lock_status status;
+ int ret;
+
+ ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_LOCK_STATUS, status))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+dpll_msg_add_temp(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ s32 temp;
+ int ret;
+
+ if (!ops->temp_get)
+ return 0;
+ ret = ops->temp_get(dpll, dpll_priv(dpll), &temp, extack);
+ if (ret)
+ return ret;
+ if (nla_put_s32(msg, DPLL_A_TEMP, temp))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+dpll_msg_add_pin_prio(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ u32 prio;
+ int ret;
+
+ if (!ops->prio_get)
+ return 0;
+ ret = ops->prio_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
+ dpll_priv(dpll), &prio, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PIN_PRIO, prio))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+dpll_msg_add_pin_on_dpll_state(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ enum dpll_pin_state state;
+ int ret;
+
+ if (!ops->state_on_dpll_get)
+ return 0;
+ ret = ops->state_on_dpll_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ dpll, dpll_priv(dpll), &state, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PIN_STATE, state))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+dpll_msg_add_pin_direction(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ enum dpll_pin_direction direction;
+ int ret;
+
+ ret = ops->direction_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
+ dpll_priv(dpll), &direction, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PIN_DIRECTION, direction))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref, struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ struct nlattr *nest;
+ int fs, ret;
+ u64 freq;
+
+ if (!ops->frequency_get)
+ return 0;
+ ret = ops->frequency_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
+ dpll_priv(dpll), &freq, extack);
+ if (ret)
+ return ret;
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
+ DPLL_A_PIN_PAD))
+ return -EMSGSIZE;
+ for (fs = 0; fs < pin->prop->freq_supported_num; fs++) {
+ nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
+ if (!nest)
+ return -EMSGSIZE;
+ freq = pin->prop->freq_supported[fs].min;
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
+ &freq, DPLL_A_PIN_PAD)) {
+ nla_nest_cancel(msg, nest);
+ return -EMSGSIZE;
+ }
+ freq = pin->prop->freq_supported[fs].max;
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
+ &freq, DPLL_A_PIN_PAD)) {
+ nla_nest_cancel(msg, nest);
+ return -EMSGSIZE;
+ }
+ nla_nest_end(msg, nest);
+ }
+
+ return 0;
+}
+
+static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
+{
+ int fs;
+
+ for (fs = 0; fs < pin->prop->freq_supported_num; fs++)
+ if (freq >= pin->prop->freq_supported[fs].min &&
+ freq <= pin->prop->freq_supported[fs].max)
+ return true;
+ return false;
+}
+
+static int
+dpll_msg_add_pin_parents(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *dpll_ref,
+ struct netlink_ext_ack *extack)
+{
+ enum dpll_pin_state state;
+ struct dpll_pin_ref *ref;
+ struct dpll_pin *ppin;
+ struct nlattr *nest;
+ unsigned long index;
+ int ret;
+
+ xa_for_each(&pin->parent_refs, index, ref) {
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ void *parent_priv;
+
+ ppin = ref->pin;
+ parent_priv = dpll_pin_on_dpll_priv(dpll_ref->dpll, ppin);
+ ret = ops->state_on_pin_get(pin,
+ dpll_pin_on_pin_priv(ppin, pin),
+ ppin, parent_priv, &state, extack);
+ if (ret)
+ return ret;
+ nest = nla_nest_start(msg, DPLL_A_PIN_PARENT_PIN);
+ if (!nest)
+ return -EMSGSIZE;
+ ret = dpll_msg_add_dev_parent_handle(msg, ppin->id);
+ if (ret)
+ goto nest_cancel;
+ if (nla_put_u32(msg, DPLL_A_PIN_STATE, state)) {
+ ret = -EMSGSIZE;
+ goto nest_cancel;
+ }
+ nla_nest_end(msg, nest);
+ }
+
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(msg, nest);
+ return ret;
+}
+
+static int
+dpll_msg_add_pin_dplls(struct sk_buff *msg, struct dpll_pin *pin,
+ struct netlink_ext_ack *extack)
+{
+ struct dpll_pin_ref *ref;
+ struct nlattr *attr;
+ unsigned long index;
+ int ret;
+
+ xa_for_each(&pin->dpll_refs, index, ref) {
+ attr = nla_nest_start(msg, DPLL_A_PIN_PARENT_DEVICE);
+ if (!attr)
+ return -EMSGSIZE;
+ ret = dpll_msg_add_dev_parent_handle(msg, ref->dpll->id);
+ if (ret)
+ goto nest_cancel;
+ ret = dpll_msg_add_pin_on_dpll_state(msg, pin, ref, extack);
+ if (ret)
+ goto nest_cancel;
+ ret = dpll_msg_add_pin_prio(msg, pin, ref, extack);
+ if (ret)
+ goto nest_cancel;
+ ret = dpll_msg_add_pin_direction(msg, pin, ref, extack);
+ if (ret)
+ goto nest_cancel;
+ nla_nest_end(msg, attr);
+ }
+
+ return 0;
+
+nest_cancel:
+ nla_nest_end(msg, attr);
+ return ret;
+}
+
+static int
+dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_properties *prop = pin->prop;
+ struct dpll_pin_ref *ref;
+ int ret;
+
+ ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
+ ASSERT_NOT_NULL(ref);
+
+ ret = dpll_msg_add_pin_handle(msg, pin);
+ if (ret)
+ return ret;
+ if (nla_put_string(msg, DPLL_A_PIN_MODULE_NAME,
+ module_name(pin->module)))
+ return -EMSGSIZE;
+ if (nla_put_64bit(msg, DPLL_A_PIN_CLOCK_ID, sizeof(pin->clock_id),
+ &pin->clock_id, DPLL_A_PIN_PAD))
+ return -EMSGSIZE;
+ if (prop->board_label &&
+ nla_put_string(msg, DPLL_A_PIN_BOARD_LABEL, prop->board_label))
+ return -EMSGSIZE;
+ if (prop->panel_label &&
+ nla_put_string(msg, DPLL_A_PIN_PANEL_LABEL, prop->panel_label))
+ return -EMSGSIZE;
+ if (prop->package_label &&
+ nla_put_string(msg, DPLL_A_PIN_PACKAGE_LABEL,
+ prop->package_label))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, DPLL_A_PIN_TYPE, prop->type))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, DPLL_A_PIN_CAPABILITIES, prop->capabilities))
+ return -EMSGSIZE;
+ ret = dpll_msg_add_pin_freq(msg, pin, ref, extack);
+ if (ret)
+ return ret;
+ if (xa_empty(&pin->parent_refs))
+ ret = dpll_msg_add_pin_dplls(msg, pin, extack);
+ else
+ ret = dpll_msg_add_pin_parents(msg, pin, ref, extack);
+
+ return ret;
+}
+
+static int
+dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
+ struct netlink_ext_ack *extack)
+{
+ int ret;
+
+ ret = dpll_msg_add_dev_handle(msg, dpll);
+ if (ret)
+ return ret;
+ if (nla_put_string(msg, DPLL_A_MODULE_NAME, module_name(dpll->module)))
+ return -EMSGSIZE;
+ if (nla_put_64bit(msg, DPLL_A_CLOCK_ID, sizeof(dpll->clock_id),
+ &dpll->clock_id, DPLL_A_PAD))
+ return -EMSGSIZE;
+ ret = dpll_msg_add_temp(msg, dpll, extack);
+ if (ret)
+ return ret;
+ ret = dpll_msg_add_lock_status(msg, dpll, extack);
+ if (ret)
+ return ret;
+ ret = dpll_msg_add_mode(msg, dpll, extack);
+ if (ret)
+ return ret;
+ ret = dpll_msg_add_mode_supported(msg, dpll, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_TYPE, dpll->type))
+ return -EMSGSIZE;
+
+ return ret;
+}
+
+static int
+dpll_device_event_send(enum dpll_cmd event, struct dpll_device *dpll)
+{
+ struct sk_buff *msg;
+ int ret = -ENOMEM;
+ void *hdr;
+
+ if (WARN_ON(!xa_get_mark(&dpll_device_xa, dpll->id, DPLL_REGISTERED)))
+ return -ENODEV;
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ hdr = genlmsg_put(msg, 0, 0, &dpll_nl_family, 0, event);
+ if (!hdr)
+ goto err_free_msg;
+ ret = dpll_device_get_one(dpll, msg, NULL);
+ if (ret)
+ goto err_cancel_msg;
+ genlmsg_end(msg, hdr);
+ genlmsg_multicast(&dpll_nl_family, msg, 0, 0, GFP_KERNEL);
+
+ return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+
+ return ret;
+}
+
+int dpll_device_create_ntf(struct dpll_device *dpll)
+{
+ return dpll_device_event_send(DPLL_CMD_DEVICE_CREATE_NTF, dpll);
+}
+
+int dpll_device_delete_ntf(struct dpll_device *dpll)
+{
+ return dpll_device_event_send(DPLL_CMD_DEVICE_DELETE_NTF, dpll);
+}
+
+static int
+__dpll_device_change_ntf(struct dpll_device *dpll)
+{
+ return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
+}
+
+/**
+ * dpll_device_change_ntf - notify that the dpll device has been changed
+ * @dpll: registered dpll pointer
+ *
+ * Context: acquires and holds a dpll_lock.
+ * Return: 0 if succeeds, error code otherwise.
+ */
+int dpll_device_change_ntf(struct dpll_device *dpll)
+{
+ int ret;
+
+ mutex_lock(&dpll_lock);
+ ret = __dpll_device_change_ntf(dpll);
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpll_device_change_ntf);
+
+static int
+dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
+{
+ struct sk_buff *msg;
+ int ret = -ENOMEM;
+ void *hdr;
+
+ if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)))
+ return -ENODEV;
+
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &dpll_nl_family, 0, event);
+ if (!hdr)
+ goto err_free_msg;
+ ret = dpll_cmd_pin_get_one(msg, pin, NULL);
+ if (ret)
+ goto err_cancel_msg;
+ genlmsg_end(msg, hdr);
+ genlmsg_multicast(&dpll_nl_family, msg, 0, 0, GFP_KERNEL);
+
+ return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+
+ return ret;
+}
+
+int dpll_pin_create_ntf(struct dpll_pin *pin)
+{
+ return dpll_pin_event_send(DPLL_CMD_PIN_CREATE_NTF, pin);
+}
+
+int dpll_pin_delete_ntf(struct dpll_pin *pin)
+{
+ return dpll_pin_event_send(DPLL_CMD_PIN_DELETE_NTF, pin);
+}
+
+static int __dpll_pin_change_ntf(struct dpll_pin *pin)
+{
+ return dpll_pin_event_send(DPLL_CMD_PIN_CHANGE_NTF, pin);
+}
+
+/**
+ * dpll_pin_change_ntf - notify that the pin has been changed
+ * @pin: registered pin pointer
+ *
+ * Context: acquires and holds a dpll_lock.
+ * Return: 0 if succeeds, error code otherwise.
+ */
+int dpll_pin_change_ntf(struct dpll_pin *pin)
+{
+ int ret;
+
+ mutex_lock(&dpll_lock);
+ ret = __dpll_pin_change_ntf(pin);
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpll_pin_change_ntf);
+
+static int
+dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ u64 freq = nla_get_u64(a);
+ struct dpll_pin_ref *ref;
+ unsigned long i;
+ int ret;
+
+ if (!dpll_pin_is_freq_supported(pin, freq)) {
+ NL_SET_ERR_MSG_ATTR(extack, a, "frequency is not supported by the device");
+ return -EINVAL;
+ }
+
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+
+ if (!ops->frequency_set)
+ return -EOPNOTSUPP;
+ ret = ops->frequency_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ dpll, dpll_priv(dpll), freq, extack);
+ if (ret)
+ return ret;
+ }
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+}
+
+static int
+dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct dpll_pin_ref *parent_ref;
+ const struct dpll_pin_ops *ops;
+ struct dpll_pin_ref *dpll_ref;
+ void *pin_priv, *parent_priv;
+ struct dpll_pin *parent;
+ unsigned long i;
+ int ret;
+
+ if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
+ pin->prop->capabilities)) {
+ NL_SET_ERR_MSG(extack, "state changing is not allowed");
+ return -EOPNOTSUPP;
+ }
+ parent = xa_load(&dpll_pin_xa, parent_idx);
+ if (!parent)
+ return -EINVAL;
+ parent_ref = xa_load(&pin->parent_refs, parent->pin_idx);
+ if (!parent_ref)
+ return -EINVAL;
+ xa_for_each(&parent->dpll_refs, i, dpll_ref) {
+ ops = dpll_pin_ops(parent_ref);
+ if (!ops->state_on_pin_set)
+ return -EOPNOTSUPP;
+ pin_priv = dpll_pin_on_pin_priv(parent, pin);
+ parent_priv = dpll_pin_on_dpll_priv(dpll_ref->dpll, parent);
+ ret = ops->state_on_pin_set(pin, pin_priv, parent, parent_priv,
+ state, extack);
+ if (ret)
+ return ret;
+ }
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+}
+
+static int
+dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops;
+ struct dpll_pin_ref *ref;
+ int ret;
+
+ if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
+ pin->prop->capabilities)) {
+ NL_SET_ERR_MSG(extack, "state changing is not allowed");
+ return -EOPNOTSUPP;
+ }
+ ref = xa_load(&pin->dpll_refs, dpll->id);
+ ASSERT_NOT_NULL(ref);
+ ops = dpll_pin_ops(ref);
+ if (!ops->state_on_dpll_set)
+ return -EOPNOTSUPP;
+ ret = ops->state_on_dpll_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ dpll, dpll_priv(dpll), state, extack);
+ if (ret)
+ return ret;
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+}
+
+static int
+dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops;
+ struct dpll_pin_ref *ref;
+ int ret;
+
+ if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
+ pin->prop->capabilities)) {
+ NL_SET_ERR_MSG(extack, "prio changing is not allowed");
+ return -EOPNOTSUPP;
+ }
+ ref = xa_load(&pin->dpll_refs, dpll->id);
+ ASSERT_NOT_NULL(ref);
+ ops = dpll_pin_ops(ref);
+ if (!ops->prio_set)
+ return -EOPNOTSUPP;
+ ret = ops->prio_set(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
+ dpll_priv(dpll), prio, extack);
+ if (ret)
+ return ret;
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+}
+
+static int
+dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops;
+ struct dpll_pin_ref *ref;
+ int ret;
+
+ if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
+ pin->prop->capabilities)) {
+ NL_SET_ERR_MSG(extack, "direction changing is not allowed");
+ return -EOPNOTSUPP;
+ }
+ ref = xa_load(&pin->dpll_refs, dpll->id);
+ ASSERT_NOT_NULL(ref);
+ ops = dpll_pin_ops(ref);
+ if (!ops->direction_set)
+ return -EOPNOTSUPP;
+ ret = ops->direction_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ dpll, dpll_priv(dpll), direction, extack);
+ if (ret)
+ return ret;
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+}
+
+static int
+dpll_pin_parent_device_set(struct dpll_pin *pin, struct nlattr *parent_nest,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[DPLL_A_PIN_MAX + 1];
+ enum dpll_pin_direction direction;
+ enum dpll_pin_state state;
+ struct dpll_pin_ref *ref;
+ struct dpll_device *dpll;
+ u32 pdpll_idx, prio;
+ int ret;
+
+ nla_parse_nested(tb, DPLL_A_PIN_MAX, parent_nest,
+ dpll_pin_parent_device_nl_policy, extack);
+ if (!tb[DPLL_A_PIN_PARENT_ID]) {
+ NL_SET_ERR_MSG(extack, "device parent id expected");
+ return -EINVAL;
+ }
+ pdpll_idx = nla_get_u32(tb[DPLL_A_PIN_PARENT_ID]);
+ dpll = xa_load(&dpll_device_xa, pdpll_idx);
+ if (!dpll) {
+ NL_SET_ERR_MSG(extack, "parent device not found");
+ return -EINVAL;
+ }
+ ref = xa_load(&pin->dpll_refs, dpll->id);
+ if (!ref) {
+ NL_SET_ERR_MSG(extack, "pin not connected to given parent device");
+ return -EINVAL;
+ }
+ if (tb[DPLL_A_PIN_STATE]) {
+ state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
+ ret = dpll_pin_state_set(dpll, pin, state, extack);
+ if (ret)
+ return ret;
+ }
+ if (tb[DPLL_A_PIN_PRIO]) {
+ prio = nla_get_u32(tb[DPLL_A_PIN_PRIO]);
+ ret = dpll_pin_prio_set(dpll, pin, prio, extack);
+ if (ret)
+ return ret;
+ }
+ if (tb[DPLL_A_PIN_DIRECTION]) {
+ direction = nla_get_u32(tb[DPLL_A_PIN_DIRECTION]);
+ ret = dpll_pin_direction_set(pin, dpll, direction, extack);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[DPLL_A_PIN_MAX + 1];
+ enum dpll_pin_state state;
+ u32 ppin_idx;
+ int ret;
+
+ nla_parse_nested(tb, DPLL_A_PIN_MAX, parent_nest,
+ dpll_pin_parent_pin_nl_policy, extack);
+ if (!tb[DPLL_A_PIN_PARENT_ID]) {
+ NL_SET_ERR_MSG(extack, "device parent id expected");
+ return -EINVAL;
+ }
+ ppin_idx = nla_get_u32(tb[DPLL_A_PIN_PARENT_ID]);
+ state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
+ ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+dpll_pin_set_from_nlattr(struct dpll_pin *pin, struct genl_info *info)
+{
+ struct nlattr *a;
+ int rem, ret;
+
+ nla_for_each_attr(a, genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ switch (nla_type(a)) {
+ case DPLL_A_PIN_FREQUENCY:
+ ret = dpll_pin_freq_set(pin, a, info->extack);
+ if (ret)
+ return ret;
+ break;
+ case DPLL_A_PIN_PARENT_DEVICE:
+ ret = dpll_pin_parent_device_set(pin, a, info->extack);
+ if (ret)
+ return ret;
+ break;
+ case DPLL_A_PIN_PARENT_PIN:
+ ret = dpll_pin_parent_pin_set(pin, a, info->extack);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static struct dpll_pin *
+dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
+ enum dpll_pin_type type, struct nlattr *board_label,
+ struct nlattr *panel_label, struct nlattr *package_label,
+ struct netlink_ext_ack *extack)
+{
+ bool board_match, panel_match, package_match;
+ struct dpll_pin *pin_match = NULL, *pin;
+ const struct dpll_pin_properties *prop;
+ bool cid_match, mod_match, type_match;
+ unsigned long i;
+
+ xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
+ prop = pin->prop;
+ cid_match = clock_id ? pin->clock_id == clock_id : true;
+ mod_match = mod_name_attr && module_name(pin->module) ?
+ !nla_strcmp(mod_name_attr,
+ module_name(pin->module)) : true;
+ type_match = type ? prop->type == type : true;
+ board_match = board_label ? (prop->board_label ?
+ !nla_strcmp(board_label, prop->board_label) : false) :
+ true;
+ panel_match = panel_label ? (prop->panel_label ?
+ !nla_strcmp(panel_label, prop->panel_label) : false) :
+ true;
+ package_match = package_label ? (prop->package_label ?
+ !nla_strcmp(package_label, prop->package_label) :
+ false) : true;
+ if (cid_match && mod_match && type_match && board_match &&
+ panel_match && package_match) {
+ if (pin_match) {
+ NL_SET_ERR_MSG(extack, "multiple matches");
+ return ERR_PTR(-EINVAL);
+ }
+ pin_match = pin;
+ }
+ }
+ if (!pin_match) {
+ NL_SET_ERR_MSG(extack, "not found");
+ return ERR_PTR(-ENODEV);
+ }
+ return pin_match;
+}
+
+static struct dpll_pin *dpll_pin_find_from_nlattr(struct genl_info *info)
+{
+ struct nlattr *attr, *mod_name_attr = NULL, *board_label_attr = NULL,
+ *panel_label_attr = NULL, *package_label_attr = NULL;
+ enum dpll_pin_type type = 0;
+ u64 clock_id = 0;
+ int rem = 0;
+
+ nla_for_each_attr(attr, genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ switch (nla_type(attr)) {
+ case DPLL_A_PIN_CLOCK_ID:
+ if (clock_id)
+ goto duplicated_attr;
+ clock_id = nla_get_u64(attr);
+ break;
+ case DPLL_A_PIN_MODULE_NAME:
+ if (mod_name_attr)
+ goto duplicated_attr;
+ mod_name_attr = attr;
+ break;
+ case DPLL_A_PIN_TYPE:
+ if (type)
+ goto duplicated_attr;
+ type = nla_get_u32(attr);
+ break;
+ case DPLL_A_PIN_BOARD_LABEL:
+ if (board_label_attr)
+ goto duplicated_attr;
+ board_label_attr = attr;
+ break;
+ case DPLL_A_PIN_PANEL_LABEL:
+ if (panel_label_attr)
+ goto duplicated_attr;
+ panel_label_attr = attr;
+ break;
+ case DPLL_A_PIN_PACKAGE_LABEL:
+ if (package_label_attr)
+ goto duplicated_attr;
+ package_label_attr = attr;
+ break;
+ default:
+ break;
+ }
+ }
+ if (!(clock_id || mod_name_attr || board_label_attr ||
+ panel_label_attr || package_label_attr)) {
+ NL_SET_ERR_MSG(info->extack, "missing attributes");
+ return ERR_PTR(-EINVAL);
+ }
+ return dpll_pin_find(clock_id, mod_name_attr, type, board_label_attr,
+ panel_label_attr, package_label_attr,
+ info->extack);
+duplicated_attr:
+ NL_SET_ERR_MSG(info->extack, "duplicated attribute");
+ return ERR_PTR(-EINVAL);
+}
+
+int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct dpll_pin *pin;
+ struct sk_buff *msg;
+ struct nlattr *hdr;
+ int ret;
+
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
+ DPLL_CMD_PIN_ID_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ pin = dpll_pin_find_from_nlattr(info);
+ if (!IS_ERR(pin)) {
+ ret = dpll_msg_add_pin_handle(msg, pin);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
+ }
+ }
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+}
+
+int dpll_nl_pin_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct dpll_pin *pin = info->user_ptr[0];
+ struct sk_buff *msg;
+ struct nlattr *hdr;
+ int ret;
+
+ if (!pin)
+ return -ENODEV;
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
+ DPLL_CMD_PIN_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+ ret = dpll_cmd_pin_get_one(msg, pin, info->extack);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
+ }
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+}
+
+int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
+ struct dpll_pin *pin;
+ struct nlattr *hdr;
+ unsigned long i;
+ int ret = 0;
+
+ xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
+ ctx->idx) {
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ &dpll_nl_family, NLM_F_MULTI,
+ DPLL_CMD_PIN_GET);
+ if (!hdr) {
+ ret = -EMSGSIZE;
+ break;
+ }
+ ret = dpll_cmd_pin_get_one(skb, pin, cb->extack);
+ if (ret) {
+ genlmsg_cancel(skb, hdr);
+ break;
+ }
+ genlmsg_end(skb, hdr);
+ }
+ if (ret == -EMSGSIZE) {
+ ctx->idx = i;
+ return skb->len;
+ }
+ return ret;
+}
+
+int dpll_nl_pin_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct dpll_pin *pin = info->user_ptr[0];
+
+ return dpll_pin_set_from_nlattr(pin, info);
+}
+
+static struct dpll_device *
+dpll_device_find(u64 clock_id, struct nlattr *mod_name_attr,
+ enum dpll_type type, struct netlink_ext_ack *extack)
+{
+ struct dpll_device *dpll_match = NULL, *dpll;
+ bool cid_match, mod_match, type_match;
+ unsigned long i;
+
+ xa_for_each_marked(&dpll_device_xa, i, dpll, DPLL_REGISTERED) {
+ cid_match = clock_id ? dpll->clock_id == clock_id : true;
+ mod_match = mod_name_attr ? (module_name(dpll->module) ?
+ !nla_strcmp(mod_name_attr,
+ module_name(dpll->module)) : false) : true;
+ type_match = type ? dpll->type == type : true;
+ if (cid_match && mod_match && type_match) {
+ if (dpll_match) {
+ NL_SET_ERR_MSG(extack, "multiple matches");
+ return ERR_PTR(-EINVAL);
+ }
+ dpll_match = dpll;
+ }
+ }
+ if (!dpll_match) {
+ NL_SET_ERR_MSG(extack, "not found");
+ return ERR_PTR(-ENODEV);
+ }
+
+ return dpll_match;
+}
+
+static struct dpll_device *
+dpll_device_find_from_nlattr(struct genl_info *info)
+{
+ struct nlattr *attr, *mod_name_attr = NULL;
+ enum dpll_type type = 0;
+ u64 clock_id = 0;
+ int rem = 0;
+
+ nla_for_each_attr(attr, genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ switch (nla_type(attr)) {
+ case DPLL_A_CLOCK_ID:
+ if (clock_id)
+ goto duplicated_attr;
+ clock_id = nla_get_u64(attr);
+ break;
+ case DPLL_A_MODULE_NAME:
+ if (mod_name_attr)
+ goto duplicated_attr;
+ mod_name_attr = attr;
+ break;
+ case DPLL_A_TYPE:
+ if (type)
+ goto duplicated_attr;
+ type = nla_get_u32(attr);
+ break;
+ default:
+ break;
+ }
+ }
+ if (!clock_id && !mod_name_attr && !type) {
+ NL_SET_ERR_MSG(info->extack, "missing attributes");
+ return ERR_PTR(-EINVAL);
+ }
+ return dpll_device_find(clock_id, mod_name_attr, type, info->extack);
+duplicated_attr:
+ NL_SET_ERR_MSG(info->extack, "duplicated attribute");
+ return ERR_PTR(-EINVAL);
+}
+
+int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct dpll_device *dpll;
+ struct sk_buff *msg;
+ struct nlattr *hdr;
+ int ret;
+
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
+ DPLL_CMD_DEVICE_ID_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ dpll = dpll_device_find_from_nlattr(info);
+ if (!IS_ERR(dpll)) {
+ ret = dpll_msg_add_dev_handle(msg, dpll);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
+ }
+ }
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+}
+
+int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct dpll_device *dpll = info->user_ptr[0];
+ struct sk_buff *msg;
+ struct nlattr *hdr;
+ int ret;
+
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
+ DPLL_CMD_DEVICE_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ ret = dpll_device_get_one(dpll, msg, info->extack);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
+ }
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+}
+
+int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ /* placeholder for set command */
+ return 0;
+}
+
+int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
+ struct dpll_device *dpll;
+ struct nlattr *hdr;
+ unsigned long i;
+ int ret = 0;
+
+ xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED,
+ ctx->idx) {
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &dpll_nl_family,
+ NLM_F_MULTI, DPLL_CMD_DEVICE_GET);
+ if (!hdr) {
+ ret = -EMSGSIZE;
+ break;
+ }
+ ret = dpll_device_get_one(dpll, skb, cb->extack);
+ if (ret) {
+ genlmsg_cancel(skb, hdr);
+ break;
+ }
+ genlmsg_end(skb, hdr);
+ }
+ if (ret == -EMSGSIZE) {
+ ctx->idx = i;
+ return skb->len;
+ }
+ return ret;
+}
+
+int dpll_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ u32 id;
+
+ if (GENL_REQ_ATTR_CHECK(info, DPLL_A_ID))
+ return -EINVAL;
+
+ mutex_lock(&dpll_lock);
+ id = nla_get_u32(info->attrs[DPLL_A_ID]);
+ info->user_ptr[0] = dpll_device_get_by_id(id);
+ if (!info->user_ptr[0]) {
+ NL_SET_ERR_MSG(info->extack, "device not found");
+ goto unlock;
+ }
+ return 0;
+unlock:
+ mutex_unlock(&dpll_lock);
+ return -ENODEV;
+}
+
+void dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ mutex_unlock(&dpll_lock);
+}
+
+int
+dpll_lock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ mutex_lock(&dpll_lock);
+
+ return 0;
+}
+
+void
+dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ mutex_unlock(&dpll_lock);
+}
+
+int dpll_lock_dumpit(struct netlink_callback *cb)
+{
+ mutex_lock(&dpll_lock);
+
+ return 0;
+}
+
+int dpll_unlock_dumpit(struct netlink_callback *cb)
+{
+ mutex_unlock(&dpll_lock);
+
+ return 0;
+}
+
+int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ int ret;
+
+ mutex_lock(&dpll_lock);
+ if (GENL_REQ_ATTR_CHECK(info, DPLL_A_PIN_ID)) {
+ ret = -EINVAL;
+ goto unlock_dev;
+ }
+ info->user_ptr[0] = xa_load(&dpll_pin_xa,
+ nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
+ if (!info->user_ptr[0]) {
+ NL_SET_ERR_MSG(info->extack, "pin not found");
+ ret = -ENODEV;
+ goto unlock_dev;
+ }
+
+ return 0;
+
+unlock_dev:
+ mutex_unlock(&dpll_lock);
+ return ret;
+}
+
+void dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ mutex_unlock(&dpll_lock);
+}
diff --git a/drivers/dpll/dpll_netlink.h b/drivers/dpll/dpll_netlink.h
new file mode 100644
index 000000000000..a9cfd55f57fc
--- /dev/null
+++ b/drivers/dpll/dpll_netlink.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel and affiliates
+ */
+
+int dpll_device_create_ntf(struct dpll_device *dpll);
+
+int dpll_device_delete_ntf(struct dpll_device *dpll);
+
+int dpll_pin_create_ntf(struct dpll_pin *pin);
+
+int dpll_pin_delete_ntf(struct dpll_pin *pin);
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
new file mode 100644
index 000000000000..14064c8c783b
--- /dev/null
+++ b/drivers/dpll/dpll_nl.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/dpll.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "dpll_nl.h"
+
+#include <uapi/linux/dpll.h>
+
+/* Common nested types */
+const struct nla_policy dpll_pin_parent_device_nl_policy[DPLL_A_PIN_STATE + 1] = {
+ [DPLL_A_PIN_PARENT_ID] = { .type = NLA_U32, },
+ [DPLL_A_PIN_DIRECTION] = NLA_POLICY_RANGE(NLA_U32, 1, 2),
+ [DPLL_A_PIN_PRIO] = { .type = NLA_U32, },
+ [DPLL_A_PIN_STATE] = NLA_POLICY_RANGE(NLA_U32, 1, 3),
+};
+
+const struct nla_policy dpll_pin_parent_pin_nl_policy[DPLL_A_PIN_STATE + 1] = {
+ [DPLL_A_PIN_PARENT_ID] = { .type = NLA_U32, },
+ [DPLL_A_PIN_STATE] = NLA_POLICY_RANGE(NLA_U32, 1, 3),
+};
+
+/* DPLL_CMD_DEVICE_ID_GET - do */
+static const struct nla_policy dpll_device_id_get_nl_policy[DPLL_A_TYPE + 1] = {
+ [DPLL_A_MODULE_NAME] = { .type = NLA_NUL_STRING, },
+ [DPLL_A_CLOCK_ID] = { .type = NLA_U64, },
+ [DPLL_A_TYPE] = NLA_POLICY_RANGE(NLA_U32, 1, 2),
+};
+
+/* DPLL_CMD_DEVICE_GET - do */
+static const struct nla_policy dpll_device_get_nl_policy[DPLL_A_ID + 1] = {
+ [DPLL_A_ID] = { .type = NLA_U32, },
+};
+
+/* DPLL_CMD_DEVICE_SET - do */
+static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_ID + 1] = {
+ [DPLL_A_ID] = { .type = NLA_U32, },
+};
+
+/* DPLL_CMD_PIN_ID_GET - do */
+static const struct nla_policy dpll_pin_id_get_nl_policy[DPLL_A_PIN_TYPE + 1] = {
+ [DPLL_A_PIN_MODULE_NAME] = { .type = NLA_NUL_STRING, },
+ [DPLL_A_PIN_CLOCK_ID] = { .type = NLA_U64, },
+ [DPLL_A_PIN_BOARD_LABEL] = { .type = NLA_NUL_STRING, },
+ [DPLL_A_PIN_PANEL_LABEL] = { .type = NLA_NUL_STRING, },
+ [DPLL_A_PIN_PACKAGE_LABEL] = { .type = NLA_NUL_STRING, },
+ [DPLL_A_PIN_TYPE] = NLA_POLICY_RANGE(NLA_U32, 1, 5),
+};
+
+/* DPLL_CMD_PIN_GET - do */
+static const struct nla_policy dpll_pin_get_do_nl_policy[DPLL_A_PIN_ID + 1] = {
+ [DPLL_A_PIN_ID] = { .type = NLA_U32, },
+};
+
+/* DPLL_CMD_PIN_GET - dump */
+static const struct nla_policy dpll_pin_get_dump_nl_policy[DPLL_A_PIN_ID + 1] = {
+ [DPLL_A_PIN_ID] = { .type = NLA_U32, },
+};
+
+/* DPLL_CMD_PIN_SET - do */
+static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_PARENT_PIN + 1] = {
+ [DPLL_A_PIN_ID] = { .type = NLA_U32, },
+ [DPLL_A_PIN_FREQUENCY] = { .type = NLA_U64, },
+ [DPLL_A_PIN_DIRECTION] = NLA_POLICY_RANGE(NLA_U32, 1, 2),
+ [DPLL_A_PIN_PRIO] = { .type = NLA_U32, },
+ [DPLL_A_PIN_STATE] = NLA_POLICY_RANGE(NLA_U32, 1, 3),
+ [DPLL_A_PIN_PARENT_DEVICE] = NLA_POLICY_NESTED(dpll_pin_parent_device_nl_policy),
+ [DPLL_A_PIN_PARENT_PIN] = NLA_POLICY_NESTED(dpll_pin_parent_pin_nl_policy),
+};
+
+/* Ops table for dpll */
+static const struct genl_split_ops dpll_nl_ops[] = {
+ {
+ .cmd = DPLL_CMD_DEVICE_ID_GET,
+ .pre_doit = dpll_lock_doit,
+ .doit = dpll_nl_device_id_get_doit,
+ .post_doit = dpll_unlock_doit,
+ .policy = dpll_device_id_get_nl_policy,
+ .maxattr = DPLL_A_TYPE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DPLL_CMD_DEVICE_GET,
+ .pre_doit = dpll_pre_doit,
+ .doit = dpll_nl_device_get_doit,
+ .post_doit = dpll_post_doit,
+ .policy = dpll_device_get_nl_policy,
+ .maxattr = DPLL_A_ID,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DPLL_CMD_DEVICE_GET,
+ .start = dpll_lock_dumpit,
+ .dumpit = dpll_nl_device_get_dumpit,
+ .done = dpll_unlock_dumpit,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DPLL_CMD_DEVICE_SET,
+ .pre_doit = dpll_pre_doit,
+ .doit = dpll_nl_device_set_doit,
+ .post_doit = dpll_post_doit,
+ .policy = dpll_device_set_nl_policy,
+ .maxattr = DPLL_A_ID,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DPLL_CMD_PIN_ID_GET,
+ .pre_doit = dpll_lock_doit,
+ .doit = dpll_nl_pin_id_get_doit,
+ .post_doit = dpll_unlock_doit,
+ .policy = dpll_pin_id_get_nl_policy,
+ .maxattr = DPLL_A_PIN_TYPE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DPLL_CMD_PIN_GET,
+ .pre_doit = dpll_pin_pre_doit,
+ .doit = dpll_nl_pin_get_doit,
+ .post_doit = dpll_pin_post_doit,
+ .policy = dpll_pin_get_do_nl_policy,
+ .maxattr = DPLL_A_PIN_ID,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DPLL_CMD_PIN_GET,
+ .start = dpll_lock_dumpit,
+ .dumpit = dpll_nl_pin_get_dumpit,
+ .done = dpll_unlock_dumpit,
+ .policy = dpll_pin_get_dump_nl_policy,
+ .maxattr = DPLL_A_PIN_ID,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DPLL_CMD_PIN_SET,
+ .pre_doit = dpll_pin_pre_doit,
+ .doit = dpll_nl_pin_set_doit,
+ .post_doit = dpll_pin_post_doit,
+ .policy = dpll_pin_set_nl_policy,
+ .maxattr = DPLL_A_PIN_PARENT_PIN,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+};
+
+static const struct genl_multicast_group dpll_nl_mcgrps[] = {
+ [DPLL_NLGRP_MONITOR] = { "monitor", },
+};
+
+struct genl_family dpll_nl_family __ro_after_init = {
+ .name = DPLL_FAMILY_NAME,
+ .version = DPLL_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = dpll_nl_ops,
+ .n_split_ops = ARRAY_SIZE(dpll_nl_ops),
+ .mcgrps = dpll_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(dpll_nl_mcgrps),
+};
diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h
new file mode 100644
index 000000000000..1f67aaed4742
--- /dev/null
+++ b/drivers/dpll/dpll_nl.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/dpll.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_DPLL_GEN_H
+#define _LINUX_DPLL_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/dpll.h>
+
+/* Common nested types */
+extern const struct nla_policy dpll_pin_parent_device_nl_policy[DPLL_A_PIN_STATE + 1];
+extern const struct nla_policy dpll_pin_parent_pin_nl_policy[DPLL_A_PIN_STATE + 1];
+
+int dpll_lock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+int dpll_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+void
+dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+void
+dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+void
+dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+int dpll_lock_dumpit(struct netlink_callback *cb);
+int dpll_unlock_dumpit(struct netlink_callback *cb);
+
+int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info);
+int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info);
+int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info);
+int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info);
+int dpll_nl_pin_get_doit(struct sk_buff *skb, struct genl_info *info);
+int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int dpll_nl_pin_set_doit(struct sk_buff *skb, struct genl_info *info);
+
+enum {
+ DPLL_NLGRP_MONITOR,
+};
+
+extern struct genl_family dpll_nl_family;
+
+#endif /* _LINUX_DPLL_GEN_H */
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 83214e2e70ab..dc50797a2ed0 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -247,12 +247,6 @@ static int __init net_olddevs_init(void)
for (num = 0; num < 8; ++num)
ethif_probe2(num);
-#ifdef CONFIG_COPS
- cops_probe(0);
- cops_probe(1);
- cops_probe(2);
-#endif
-
return 0;
}
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index b38ed52b82bc..b94f731e4576 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -37,36 +37,6 @@ config DEV_APPLETALK
on a network. If your Linux box is connected to such a network, and wish
to do IP over it, or you have a LocalTalk card and wish to use it to
connect to the AppleTalk network, say Y.
-
-
-config COPS
- tristate "COPS LocalTalk PC support"
- depends on DEV_APPLETALK && ISA
- depends on NETDEVICES
- select NETDEV_LEGACY_INIT
- help
- This allows you to use COPS AppleTalk cards to connect to LocalTalk
- networks. You also need version 1.3.3 or later of the netatalk
- package. This driver is experimental, which means that it may not
- work. This driver will only work if you choose "AppleTalk DDP"
- networking support, above.
- Please read the file
- <file:Documentation/networking/device_drivers/appletalk/cops.rst>.
-
-config COPS_DAYNA
- bool "Dayna firmware support"
- depends on COPS
- help
- Support COPS compatible cards with Dayna style firmware (Dayna
- DL2000/ Daynatalk/PC (half length), COPS LT-95, Farallon PhoneNET PC
- III, Farallon PhoneNET PC II).
-
-config COPS_TANGENT
- bool "Tangent firmware support"
- depends on COPS
- help
- Support COPS compatible cards with Tangent style firmware (Tangent
- ATB_II, Novell NL-1000, Daystar Digital LT-200.
config IPDDP
tristate "Appletalk-IP driver support"
diff --git a/drivers/net/appletalk/Makefile b/drivers/net/appletalk/Makefile
index 6db2943ce5d6..d8c7b23ec7ff 100644
--- a/drivers/net/appletalk/Makefile
+++ b/drivers/net/appletalk/Makefile
@@ -4,4 +4,3 @@
#
obj-$(CONFIG_IPDDP) += ipddp.o
-obj-$(CONFIG_COPS) += cops.o
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
deleted file mode 100644
index 97f254bdbb16..000000000000
--- a/drivers/net/appletalk/cops.c
+++ /dev/null
@@ -1,1005 +0,0 @@
-/* cops.c: LocalTalk driver for Linux.
- *
- * Authors:
- * - Jay Schulist <jschlst@samba.org>
- *
- * With more than a little help from;
- * - Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * Derived from:
- * - skeleton.c: A network driver outline for linux.
- * Written 1993-94 by Donald Becker.
- * - ltpc.c: A driver for the LocalTalk PC card.
- * Written by Bradford W. Johnson.
- *
- * Copyright 1993 United States Government as represented by the
- * Director, National Security Agency.
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Changes:
- * 19970608 Alan Cox Allowed dual card type support
- * Can set board type in insmod
- * Hooks for cops_setup routine
- * (not yet implemented).
- * 19971101 Jay Schulist Fixes for multiple lt* devices.
- * 19980607 Steven Hirsch Fixed the badly broken support
- * for Tangent type cards. Only
- * tested on Daystar LT200. Some
- * cleanup of formatting and program
- * logic. Added emacs 'local-vars'
- * setup for Jay's brace style.
- * 20000211 Alan Cox Cleaned up for softnet
- */
-
-static const char *version =
-"cops.c:v0.04 6/7/98 Jay Schulist <jschlst@samba.org>\n";
-/*
- * Sources:
- * COPS Localtalk SDK. This provides almost all of the information
- * needed.
- */
-
-/*
- * insmod/modprobe configurable stuff.
- * - IO Port, choose one your card supports or 0 if you dare.
- * - IRQ, also choose one your card supports or nothing and let
- * the driver figure it out.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_ltalk.h>
-#include <linux/delay.h> /* For udelay() */
-#include <linux/atalk.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <net/Space.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "cops.h" /* Our Stuff */
-#include "cops_ltdrv.h" /* Firmware code for Tangent type cards. */
-#include "cops_ffdrv.h" /* Firmware code for Dayna type cards. */
-
-/*
- * The name of the card. Is used for messages and in the requests for
- * io regions, irqs and dma channels
- */
-
-static const char *cardname = "cops";
-
-#ifdef CONFIG_COPS_DAYNA
-static int board_type = DAYNA; /* Module exported */
-#else
-static int board_type = TANGENT;
-#endif
-
-static int io = 0x240; /* Default IO for Dayna */
-static int irq = 5; /* Default IRQ */
-
-/*
- * COPS Autoprobe information.
- * Right now if port address is right but IRQ is not 5 this will
- * return a 5 no matter what since we will still get a status response.
- * Need one more additional check to narrow down after we have gotten
- * the ioaddr. But since only other possible IRQs is 3 and 4 so no real
- * hurry on this. I *STRONGLY* recommend using IRQ 5 for your card with
- * this driver.
- *
- * This driver has 2 modes and they are: Dayna mode and Tangent mode.
- * Each mode corresponds with the type of card. It has been found
- * that there are 2 main types of cards and all other cards are
- * the same and just have different names or only have minor differences
- * such as more IO ports. As this driver is tested it will
- * become more clear on exactly what cards are supported. The driver
- * defaults to using Dayna mode. To change the drivers mode, simply
- * select Dayna or Tangent mode when configuring the kernel.
- *
- * This driver should support:
- * TANGENT driver mode:
- * Tangent ATB-II, Novell NL-1000, Daystar Digital LT-200,
- * COPS LT-1
- * DAYNA driver mode:
- * Dayna DL2000/DaynaTalk PC (Half Length), COPS LT-95,
- * Farallon PhoneNET PC III, Farallon PhoneNET PC II
- * Other cards possibly supported mode unknown though:
- * Dayna DL2000 (Full length), COPS LT/M (Micro-Channel)
- *
- * Cards NOT supported by this driver but supported by the ltpc.c
- * driver written by Bradford W. Johnson <johns393@maroon.tc.umn.edu>
- * Farallon PhoneNET PC
- * Original Apple LocalTalk PC card
- *
- * N.B.
- *
- * The Daystar Digital LT200 boards do not support interrupt-driven
- * IO. You must specify 'irq=0xff' as a module parameter to invoke
- * polled mode. I also believe that the port probing logic is quite
- * dangerous at best and certainly hopeless for a polled card. Best to
- * specify both. - Steve H.
- *
- */
-
-/*
- * Zero terminated list of IO ports to probe.
- */
-
-static unsigned int ports[] = {
- 0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260,
- 0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360,
- 0
-};
-
-/*
- * Zero terminated list of IRQ ports to probe.
- */
-
-static int cops_irqlist[] = {
- 5, 4, 3, 0
-};
-
-static struct timer_list cops_timer;
-static struct net_device *cops_timer_dev;
-
-/* use 0 for production, 1 for verification, 2 for debug, 3 for verbose debug */
-#ifndef COPS_DEBUG
-#define COPS_DEBUG 1
-#endif
-static unsigned int cops_debug = COPS_DEBUG;
-
-/* The number of low I/O ports used by the card. */
-#define COPS_IO_EXTENT 8
-
-/* Information that needs to be kept for each board. */
-
-struct cops_local
-{
- int board; /* Holds what board type is. */
- int nodeid; /* Set to 1 once have nodeid. */
- unsigned char node_acquire; /* Node ID when acquired. */
- struct atalk_addr node_addr; /* Full node address */
- spinlock_t lock; /* RX/TX lock */
-};
-
-/* Index to functions, as function prototypes. */
-static int cops_probe1 (struct net_device *dev, int ioaddr);
-static int cops_irq (int ioaddr, int board);
-
-static int cops_open (struct net_device *dev);
-static int cops_jumpstart (struct net_device *dev);
-static void cops_reset (struct net_device *dev, int sleep);
-static void cops_load (struct net_device *dev);
-static int cops_nodeid (struct net_device *dev, int nodeid);
-
-static irqreturn_t cops_interrupt (int irq, void *dev_id);
-static void cops_poll(struct timer_list *t);
-static void cops_timeout(struct net_device *dev, unsigned int txqueue);
-static void cops_rx (struct net_device *dev);
-static netdev_tx_t cops_send_packet (struct sk_buff *skb,
- struct net_device *dev);
-static void set_multicast_list (struct net_device *dev);
-static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
-static int cops_close (struct net_device *dev);
-
-static void cleanup_card(struct net_device *dev)
-{
- if (dev->irq)
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, COPS_IO_EXTENT);
-}
-
-/*
- * Check for a network adaptor of this type, and return '0' iff one exists.
- * If dev->base_addr == 0, probe all likely locations.
- * If dev->base_addr in [1..0x1ff], always return failure.
- * otherwise go with what we pass in.
- */
-struct net_device * __init cops_probe(int unit)
-{
- struct net_device *dev;
- unsigned *port;
- int base_addr;
- int err = 0;
-
- dev = alloc_ltalkdev(sizeof(struct cops_local));
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "lt%d", unit);
- netdev_boot_setup_check(dev);
- irq = dev->irq;
- base_addr = dev->base_addr;
- } else {
- base_addr = dev->base_addr = io;
- }
-
- if (base_addr > 0x1ff) { /* Check a single specified location. */
- err = cops_probe1(dev, base_addr);
- } else if (base_addr != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- /* FIXME Does this really work for cards which generate irq?
- * It's definitely N.G. for polled Tangent. sh
- * Dayna cards don't autoprobe well at all, but if your card is
- * at IRQ 5 & IO 0x240 we find it every time. ;) JS
- */
- for (port = ports; *port && cops_probe1(dev, *port) < 0; port++)
- ;
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops cops_netdev_ops = {
- .ndo_open = cops_open,
- .ndo_stop = cops_close,
- .ndo_start_xmit = cops_send_packet,
- .ndo_tx_timeout = cops_timeout,
- .ndo_do_ioctl = cops_ioctl,
- .ndo_set_rx_mode = set_multicast_list,
-};
-
-/*
- * This is the real probe routine. Linux has a history of friendly device
- * probes on the ISA bus. A good device probes avoids doing writes, and
- * verifies that the correct device exists and functions.
- */
-static int __init cops_probe1(struct net_device *dev, int ioaddr)
-{
- struct cops_local *lp;
- static unsigned version_printed;
- int board = board_type;
- int retval;
-
- if(cops_debug && version_printed++ == 0)
- printk("%s", version);
-
- /* Grab the region so no one else tries to probe our ioports. */
- if (!request_region(ioaddr, COPS_IO_EXTENT, dev->name))
- return -EBUSY;
-
- /*
- * Since this board has jumpered interrupts, allocate the interrupt
- * vector now. There is no point in waiting since no other device
- * can use the interrupt, and this marks the irq as busy. Jumpered
- * interrupts are typically not reported by the boards, and we must
- * used AutoIRQ to find them.
- */
- dev->irq = irq;
- switch (dev->irq)
- {
- case 0:
- /* COPS AutoIRQ routine */
- dev->irq = cops_irq(ioaddr, board);
- if (dev->irq)
- break;
- fallthrough; /* Once no IRQ found on this port */
- case 1:
- retval = -EINVAL;
- goto err_out;
-
- /* Fixup for users that don't know that IRQ 2 is really
- * IRQ 9, or don't know which one to set.
- */
- case 2:
- dev->irq = 9;
- break;
-
- /* Polled operation requested. Although irq of zero passed as
- * a parameter tells the init routines to probe, we'll
- * overload it to denote polled operation at runtime.
- */
- case 0xff:
- dev->irq = 0;
- break;
-
- default:
- break;
- }
-
- dev->base_addr = ioaddr;
-
- /* Reserve any actual interrupt. */
- if (dev->irq) {
- retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
- if (retval)
- goto err_out;
- }
-
- lp = netdev_priv(dev);
- spin_lock_init(&lp->lock);
-
- /* Copy local board variable to lp struct. */
- lp->board = board;
-
- dev->netdev_ops = &cops_netdev_ops;
- dev->watchdog_timeo = HZ * 2;
-
-
- /* Tell the user where the card is and what mode we're in. */
- if(board==DAYNA)
- printk("%s: %s at %#3x, using IRQ %d, in Dayna mode.\n",
- dev->name, cardname, ioaddr, dev->irq);
- if(board==TANGENT) {
- if(dev->irq)
- printk("%s: %s at %#3x, IRQ %d, in Tangent mode\n",
- dev->name, cardname, ioaddr, dev->irq);
- else
- printk("%s: %s at %#3x, using polled IO, in Tangent mode.\n",
- dev->name, cardname, ioaddr);
-
- }
- return 0;
-
-err_out:
- release_region(ioaddr, COPS_IO_EXTENT);
- return retval;
-}
-
-static int __init cops_irq (int ioaddr, int board)
-{ /*
- * This does not use the IRQ to determine where the IRQ is. We just
- * assume that when we get a correct status response that it's the IRQ.
- * This really just verifies the IO port but since we only have access
- * to such a small number of IRQs (5, 4, 3) this is not bad.
- * This will probably not work for more than one card.
- */
- int irqaddr=0;
- int i, x, status;
-
- if(board==DAYNA)
- {
- outb(0, ioaddr+DAYNA_RESET);
- inb(ioaddr+DAYNA_RESET);
- mdelay(333);
- }
- if(board==TANGENT)
- {
- inb(ioaddr);
- outb(0, ioaddr);
- outb(0, ioaddr+TANG_RESET);
- }
-
- for(i=0; cops_irqlist[i] !=0; i++)
- {
- irqaddr = cops_irqlist[i];
- for(x = 0xFFFF; x>0; x --) /* wait for response */
- {
- if(board==DAYNA)
- {
- status = (inb(ioaddr+DAYNA_CARD_STATUS)&3);
- if(status == 1)
- return irqaddr;
- }
- if(board==TANGENT)
- {
- if((inb(ioaddr+TANG_CARD_STATUS)& TANG_TX_READY) !=0)
- return irqaddr;
- }
- }
- }
- return 0; /* no IRQ found */
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * sometime after booting when the 'ifconfig' program is run.
- */
-static int cops_open(struct net_device *dev)
-{
- struct cops_local *lp = netdev_priv(dev);
-
- if(dev->irq==0)
- {
- /*
- * I don't know if the Dayna-style boards support polled
- * operation. For now, only allow it for Tangent.
- */
- if(lp->board==TANGENT) /* Poll 20 times per second */
- {
- cops_timer_dev = dev;
- timer_setup(&cops_timer, cops_poll, 0);
- cops_timer.expires = jiffies + HZ/20;
- add_timer(&cops_timer);
- }
- else
- {
- printk(KERN_WARNING "%s: No irq line set\n", dev->name);
- return -EAGAIN;
- }
- }
-
- cops_jumpstart(dev); /* Start the card up. */
-
- netif_start_queue(dev);
- return 0;
-}
-
-/*
- * This allows for a dynamic start/restart of the entire card.
- */
-static int cops_jumpstart(struct net_device *dev)
-{
- struct cops_local *lp = netdev_priv(dev);
-
- /*
- * Once the card has the firmware loaded and has acquired
- * the nodeid, if it is reset it will lose it all.
- */
- cops_reset(dev,1); /* Need to reset card before load firmware. */
- cops_load(dev); /* Load the firmware. */
-
- /*
- * If atalkd already gave us a nodeid we will use that
- * one again, else we wait for atalkd to give us a nodeid
- * in cops_ioctl. This may cause a problem if someone steals
- * our nodeid while we are resetting.
- */
- if(lp->nodeid == 1)
- cops_nodeid(dev,lp->node_acquire);
-
- return 0;
-}
-
-static void tangent_wait_reset(int ioaddr)
-{
- int timeout=0;
-
- while(timeout++ < 5 && (inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
- mdelay(1); /* Wait 1 second */
-}
-
-/*
- * Reset the LocalTalk board.
- */
-static void cops_reset(struct net_device *dev, int sleep)
-{
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr=dev->base_addr;
-
- if(lp->board==TANGENT)
- {
- inb(ioaddr); /* Clear request latch. */
- outb(0,ioaddr); /* Clear the TANG_TX_READY flop. */
- outb(0, ioaddr+TANG_RESET); /* Reset the adapter. */
-
- tangent_wait_reset(ioaddr);
- outb(0, ioaddr+TANG_CLEAR_INT);
- }
- if(lp->board==DAYNA)
- {
- outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */
- inb(ioaddr+DAYNA_RESET); /* Clear the reset */
- if (sleep)
- msleep(333);
- else
- mdelay(333);
- }
-
- netif_wake_queue(dev);
-}
-
-static void cops_load (struct net_device *dev)
-{
- struct ifreq ifr;
- struct ltfirmware *ltf= (struct ltfirmware *)&ifr.ifr_ifru;
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr=dev->base_addr;
- int length, i = 0;
-
- strcpy(ifr.ifr_name,"lt0");
-
- /* Get card's firmware code and do some checks on it. */
-#ifdef CONFIG_COPS_DAYNA
- if(lp->board==DAYNA)
- {
- ltf->length=sizeof(ffdrv_code);
- ltf->data=ffdrv_code;
- }
- else
-#endif
-#ifdef CONFIG_COPS_TANGENT
- if(lp->board==TANGENT)
- {
- ltf->length=sizeof(ltdrv_code);
- ltf->data=ltdrv_code;
- }
- else
-#endif
- {
- printk(KERN_INFO "%s; unsupported board type.\n", dev->name);
- return;
- }
-
- /* Check to make sure firmware is correct length. */
- if(lp->board==DAYNA && ltf->length!=5983)
- {
- printk(KERN_WARNING "%s: Firmware is not length of FFDRV.BIN.\n", dev->name);
- return;
- }
- if(lp->board==TANGENT && ltf->length!=2501)
- {
- printk(KERN_WARNING "%s: Firmware is not length of DRVCODE.BIN.\n", dev->name);
- return;
- }
-
- if(lp->board==DAYNA)
- {
- /*
- * We must wait for a status response
- * with the DAYNA board.
- */
- while(++i<65536)
- {
- if((inb(ioaddr+DAYNA_CARD_STATUS)&3)==1)
- break;
- }
-
- if(i==65536)
- return;
- }
-
- /*
- * Upload the firmware and kick. Byte-by-byte works nicely here.
- */
- i=0;
- length = ltf->length;
- while(length--)
- {
- outb(ltf->data[i], ioaddr);
- i++;
- }
-
- if(cops_debug > 1)
- printk("%s: Uploaded firmware - %d bytes of %d bytes.\n",
- dev->name, i, ltf->length);
-
- if(lp->board==DAYNA) /* Tell Dayna to run the firmware code. */
- outb(1, ioaddr+DAYNA_INT_CARD);
- else /* Tell Tang to run the firmware code. */
- inb(ioaddr);
-
- if(lp->board==TANGENT)
- {
- tangent_wait_reset(ioaddr);
- inb(ioaddr); /* Clear initial ready signal. */
- }
-}
-
-/*
- * Get the LocalTalk Nodeid from the card. We can suggest
- * any nodeid 1-254. The card will try and get that exact
- * address else we can specify 0 as the nodeid and the card
- * will autoprobe for a nodeid.
- */
-static int cops_nodeid (struct net_device *dev, int nodeid)
-{
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if(lp->board == DAYNA)
- {
- /* Empty any pending adapter responses. */
- while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0)
- {
- outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupts. */
- if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
- cops_rx(dev); /* Kick any packets waiting. */
- schedule();
- }
-
- outb(2, ioaddr); /* Output command packet length as 2. */
- outb(0, ioaddr);
- outb(LAP_INIT, ioaddr); /* Send LAP_INIT command byte. */
- outb(nodeid, ioaddr); /* Suggest node address. */
- }
-
- if(lp->board == TANGENT)
- {
- /* Empty any pending adapter responses. */
- while(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
- {
- outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupt. */
- cops_rx(dev); /* Kick out packets waiting. */
- schedule();
- }
-
- /* Not sure what Tangent does if nodeid picked is used. */
- if(nodeid == 0) /* Seed. */
- nodeid = jiffies&0xFF; /* Get a random try */
- outb(2, ioaddr); /* Command length LSB */
- outb(0, ioaddr); /* Command length MSB */
- outb(LAP_INIT, ioaddr); /* Send LAP_INIT byte */
- outb(nodeid, ioaddr); /* LAP address hint. */
- outb(0xFF, ioaddr); /* Int. level to use */
- }
-
- lp->node_acquire=0; /* Set nodeid holder to 0. */
- while(lp->node_acquire==0) /* Get *True* nodeid finally. */
- {
- outb(0, ioaddr+COPS_CLEAR_INT); /* Clear any interrupt. */
-
- if(lp->board == DAYNA)
- {
- if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
- cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
- }
- if(lp->board == TANGENT)
- {
- if(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
- cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
- }
- schedule();
- }
-
- if(cops_debug > 1)
- printk(KERN_DEBUG "%s: Node ID %d has been acquired.\n",
- dev->name, lp->node_acquire);
-
- lp->nodeid=1; /* Set got nodeid to 1. */
-
- return 0;
-}
-
-/*
- * Poll the Tangent type cards to see if we have work.
- */
-
-static void cops_poll(struct timer_list *unused)
-{
- int ioaddr, status;
- int boguscount = 0;
- struct net_device *dev = cops_timer_dev;
-
- del_timer(&cops_timer);
-
- if(dev == NULL)
- return; /* We've been downed */
-
- ioaddr = dev->base_addr;
- do {
- status=inb(ioaddr+TANG_CARD_STATUS);
- if(status & TANG_RX_READY)
- cops_rx(dev);
- if(status & TANG_TX_READY)
- netif_wake_queue(dev);
- status = inb(ioaddr+TANG_CARD_STATUS);
- } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY)));
-
- /* poll 20 times per second */
- cops_timer.expires = jiffies + HZ/20;
- add_timer(&cops_timer);
-}
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-static irqreturn_t cops_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct cops_local *lp;
- int ioaddr, status;
- int boguscount = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- if(lp->board==DAYNA)
- {
- do {
- outb(0, ioaddr + COPS_CLEAR_INT);
- status=inb(ioaddr+DAYNA_CARD_STATUS);
- if((status&0x03)==DAYNA_RX_REQUEST)
- cops_rx(dev);
- netif_wake_queue(dev);
- } while(++boguscount < 20);
- }
- else
- {
- do {
- status=inb(ioaddr+TANG_CARD_STATUS);
- if(status & TANG_RX_READY)
- cops_rx(dev);
- if(status & TANG_TX_READY)
- netif_wake_queue(dev);
- status=inb(ioaddr+TANG_CARD_STATUS);
- } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY)));
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * We have a good packet(s), get it/them out of the buffers.
- */
-static void cops_rx(struct net_device *dev)
-{
- int pkt_len = 0;
- int rsp_type = 0;
- struct sk_buff *skb = NULL;
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int boguscount = 0;
- unsigned long flags;
-
-
- spin_lock_irqsave(&lp->lock, flags);
-
- if(lp->board==DAYNA)
- {
- outb(0, ioaddr); /* Send out Zero length. */
- outb(0, ioaddr);
- outb(DATA_READ, ioaddr); /* Send read command out. */
-
- /* Wait for DMA to turn around. */
- while(++boguscount<1000000)
- {
- barrier();
- if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_READY)
- break;
- }
-
- if(boguscount==1000000)
- {
- printk(KERN_WARNING "%s: DMA timed out.\n",dev->name);
- spin_unlock_irqrestore(&lp->lock, flags);
- return;
- }
- }
-
- /* Get response length. */
- pkt_len = inb(ioaddr);
- pkt_len |= (inb(ioaddr) << 8);
- /* Input IO code. */
- rsp_type=inb(ioaddr);
-
- /* Malloc up new buffer. */
- skb = dev_alloc_skb(pkt_len);
- if(skb == NULL)
- {
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- while(pkt_len--) /* Discard packet */
- inb(ioaddr);
- spin_unlock_irqrestore(&lp->lock, flags);
- return;
- }
- skb->dev = dev;
- skb_put(skb, pkt_len);
- skb->protocol = htons(ETH_P_LOCALTALK);
-
- insb(ioaddr, skb->data, pkt_len); /* Eat the Data */
-
- if(lp->board==DAYNA)
- outb(1, ioaddr+DAYNA_INT_CARD); /* Interrupt the card */
-
- spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
-
- /* Check for bad response length */
- if(pkt_len < 0 || pkt_len > MAX_LLAP_SIZE)
- {
- printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n",
- dev->name, pkt_len);
- dev->stats.tx_errors++;
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* Set nodeid and then get out. */
- if(rsp_type == LAP_INIT_RSP)
- { /* Nodeid taken from received packet. */
- lp->node_acquire = skb->data[0];
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* One last check to make sure we have a good packet. */
- if(rsp_type != LAP_RESPONSE)
- {
- printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type);
- dev->stats.tx_errors++;
- dev_kfree_skb_any(skb);
- return;
- }
-
- skb_reset_mac_header(skb); /* Point to entire packet. */
- skb_pull(skb,3);
- skb_reset_transport_header(skb); /* Point to data (Skip header). */
-
- /* Update the counters. */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- /* Send packet to a higher place. */
- netif_rx(skb);
-}
-
-static void cops_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- dev->stats.tx_errors++;
- if(lp->board==TANGENT)
- {
- if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
- printk(KERN_WARNING "%s: No TX complete interrupt.\n", dev->name);
- }
- printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
- cops_jumpstart(dev); /* Restart the card. */
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-
-/*
- * Make the card transmit a LocalTalk packet.
- */
-
-static netdev_tx_t cops_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
-
- /*
- * Block a timer-based transmit from overlapping.
- */
-
- netif_stop_queue(dev);
-
- spin_lock_irqsave(&lp->lock, flags);
- if(lp->board == DAYNA) /* Wait for adapter transmit buffer. */
- while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0)
- cpu_relax();
- if(lp->board == TANGENT) /* Wait for adapter transmit buffer. */
- while((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
- cpu_relax();
-
- /* Output IO length. */
- outb(skb->len, ioaddr);
- outb(skb->len >> 8, ioaddr);
-
- /* Output IO code. */
- outb(LAP_WRITE, ioaddr);
-
- if(lp->board == DAYNA) /* Check the transmit buffer again. */
- while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0);
-
- outsb(ioaddr, skb->data, skb->len); /* Send out the data. */
-
- if(lp->board==DAYNA) /* Dayna requires you kick the card */
- outb(1, ioaddr+DAYNA_INT_CARD);
-
- spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
-
- /* Done sending packet, update counters and cleanup. */
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-/*
- * Dummy function to keep the Appletalk layer happy.
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- if(cops_debug >= 3)
- printk("%s: set_multicast_list executed\n", dev->name);
-}
-
-/*
- * System ioctls for the COPS LocalTalk card.
- */
-
-static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct cops_local *lp = netdev_priv(dev);
- struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr;
- struct atalk_addr *aa = &lp->node_addr;
-
- switch(cmd)
- {
- case SIOCSIFADDR:
- /* Get and set the nodeid and network # atalkd wants. */
- cops_nodeid(dev, sa->sat_addr.s_node);
- aa->s_net = sa->sat_addr.s_net;
- aa->s_node = lp->node_acquire;
-
- /* Set broardcast address. */
- dev->broadcast[0] = 0xFF;
-
- /* Set hardware address. */
- dev->addr_len = 1;
- dev_addr_set(dev, &aa->s_node);
- return 0;
-
- case SIOCGIFADDR:
- sa->sat_addr.s_net = aa->s_net;
- sa->sat_addr.s_node = aa->s_node;
- return 0;
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/*
- * The inverse routine to cops_open().
- */
-
-static int cops_close(struct net_device *dev)
-{
- struct cops_local *lp = netdev_priv(dev);
-
- /* If we were running polled, yank the timer.
- */
- if(lp->board==TANGENT && dev->irq==0)
- del_timer(&cops_timer);
-
- netif_stop_queue(dev);
- return 0;
-}
-
-
-#ifdef MODULE
-static struct net_device *cops_dev;
-
-MODULE_LICENSE("GPL");
-module_param_hw(io, int, ioport, 0);
-module_param_hw(irq, int, irq, 0);
-module_param_hw(board_type, int, other, 0);
-
-static int __init cops_module_init(void)
-{
- if (io == 0)
- printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
- cardname);
- cops_dev = cops_probe(-1);
- return PTR_ERR_OR_ZERO(cops_dev);
-}
-
-static void __exit cops_module_exit(void)
-{
- unregister_netdev(cops_dev);
- cleanup_card(cops_dev);
- free_netdev(cops_dev);
-}
-module_init(cops_module_init);
-module_exit(cops_module_exit);
-#endif /* MODULE */
diff --git a/drivers/net/appletalk/cops.h b/drivers/net/appletalk/cops.h
deleted file mode 100644
index 7a0bfb351929..000000000000
--- a/drivers/net/appletalk/cops.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* cops.h: LocalTalk driver for Linux.
- *
- * Authors:
- * - Jay Schulist <jschlst@samba.org>
- */
-
-#ifndef __LINUX_COPSLTALK_H
-#define __LINUX_COPSLTALK_H
-
-#ifdef __KERNEL__
-
-/* Max LLAP size we will accept. */
-#define MAX_LLAP_SIZE 603
-
-/* Tangent */
-#define TANG_CARD_STATUS 1
-#define TANG_CLEAR_INT 1
-#define TANG_RESET 3
-
-#define TANG_TX_READY 1
-#define TANG_RX_READY 2
-
-/* Dayna */
-#define DAYNA_CMD_DATA 0
-#define DAYNA_CLEAR_INT 1
-#define DAYNA_CARD_STATUS 2
-#define DAYNA_INT_CARD 3
-#define DAYNA_RESET 4
-
-#define DAYNA_RX_READY 0
-#define DAYNA_TX_READY 1
-#define DAYNA_RX_REQUEST 3
-
-/* Same on both card types */
-#define COPS_CLEAR_INT 1
-
-/* LAP response codes received from the cards. */
-#define LAP_INIT 1 /* Init cmd */
-#define LAP_INIT_RSP 2 /* Init response */
-#define LAP_WRITE 3 /* Write cmd */
-#define DATA_READ 4 /* Data read */
-#define LAP_RESPONSE 4 /* Received ALAP frame response */
-#define LAP_GETSTAT 5 /* Get LAP and HW status */
-#define LAP_RSPSTAT 6 /* Status response */
-
-#endif
-
-/*
- * Structure to hold the firmware information.
- */
-struct ltfirmware
-{
- unsigned int length;
- const unsigned char *data;
-};
-
-#define DAYNA 1
-#define TANGENT 2
-
-#endif
diff --git a/drivers/net/appletalk/cops_ffdrv.h b/drivers/net/appletalk/cops_ffdrv.h
deleted file mode 100644
index b02005087c1b..000000000000
--- a/drivers/net/appletalk/cops_ffdrv.h
+++ /dev/null
@@ -1,532 +0,0 @@
-
-/*
- * The firmware this driver downloads into the Localtalk card is a
- * separate program and is not GPL'd source code, even though the Linux
- * side driver and the routine that loads this data into the card are.
- *
- * It is taken from the COPS SDK and is under the following license
- *
- * This material is licensed to you strictly for use in conjunction with
- * the use of COPS LocalTalk adapters.
- * There is no charge for this SDK. And no waranty express or implied
- * about its fitness for any purpose. However, we will cheerefully
- * refund every penny you paid for this SDK...
- * Regards,
- *
- * Thomas F. Divine
- * Chief Scientist
- */
-
-
-/* cops_ffdrv.h: LocalTalk driver firmware dump for Linux.
- *
- * Authors:
- * - Jay Schulist <jschlst@samba.org>
- */
-
-
-#ifdef CONFIG_COPS_DAYNA
-
-static const unsigned char ffdrv_code[] = {
- 58,3,0,50,228,149,33,255,255,34,226,149,
- 249,17,40,152,33,202,154,183,237,82,77,68,
- 11,107,98,19,54,0,237,176,175,50,80,0,
- 62,128,237,71,62,32,237,57,51,62,12,237,
- 57,50,237,57,54,62,6,237,57,52,62,12,
- 237,57,49,33,107,137,34,32,128,33,83,130,
- 34,40,128,33,86,130,34,42,128,33,112,130,
- 34,36,128,33,211,130,34,38,128,62,0,237,
- 57,16,33,63,148,34,34,128,237,94,205,15,
- 130,251,205,168,145,24,141,67,111,112,121,114,
- 105,103,104,116,32,40,67,41,32,49,57,56,
- 56,32,45,32,68,97,121,110,97,32,67,111,
- 109,109,117,110,105,99,97,116,105,111,110,115,
- 32,32,32,65,108,108,32,114,105,103,104,116,
- 115,32,114,101,115,101,114,118,101,100,46,32,
- 32,40,68,40,68,7,16,8,34,7,22,6,
- 16,5,12,4,8,3,6,140,0,16,39,128,
- 0,4,96,10,224,6,0,7,126,2,64,11,
- 118,12,6,13,0,14,193,15,0,5,96,3,
- 192,1,64,9,8,62,9,211,66,62,192,211,
- 66,62,100,61,32,253,6,28,33,205,129,14,
- 66,237,163,194,253,129,6,28,33,205,129,14,
- 64,237,163,194,9,130,201,62,47,50,71,152,
- 62,47,211,68,58,203,129,237,57,20,58,204,
- 129,237,57,21,33,77,152,54,132,205,233,129,
- 58,228,149,254,209,40,6,56,4,62,0,24,
- 2,219,96,33,233,149,119,230,62,33,232,149,
- 119,213,33,8,152,17,7,0,25,119,19,25,
- 119,209,201,251,237,77,245,197,213,229,221,229,
- 205,233,129,62,1,50,106,137,205,158,139,221,
- 225,225,209,193,241,251,237,77,245,197,213,219,
- 72,237,56,16,230,46,237,57,16,237,56,12,
- 58,72,152,183,32,26,6,20,17,128,2,237,
- 56,46,187,32,35,237,56,47,186,32,29,219,
- 72,230,1,32,3,5,32,232,175,50,72,152,
- 229,221,229,62,1,50,106,137,205,158,139,221,
- 225,225,24,25,62,1,50,72,152,58,201,129,
- 237,57,12,58,202,129,237,57,13,237,56,16,
- 246,17,237,57,16,209,193,241,251,237,77,245,
- 197,229,213,221,229,237,56,16,230,17,237,57,
- 16,237,56,20,58,34,152,246,16,246,8,211,
- 68,62,6,61,32,253,58,34,152,246,8,211,
- 68,58,203,129,237,57,20,58,204,129,237,57,
- 21,237,56,16,246,34,237,57,16,221,225,209,
- 225,193,241,251,237,77,33,2,0,57,126,230,
- 3,237,100,1,40,2,246,128,230,130,245,62,
- 5,211,64,241,211,64,201,229,213,243,237,56,
- 16,230,46,237,57,16,237,56,12,251,70,35,
- 35,126,254,175,202,77,133,254,129,202,15,133,
- 230,128,194,191,132,43,58,44,152,119,33,76,
- 152,119,35,62,132,119,120,254,255,40,4,58,
- 49,152,119,219,72,43,43,112,17,3,0,237,
- 56,52,230,248,237,57,52,219,72,230,1,194,
- 141,131,209,225,237,56,52,246,6,237,57,52,
- 62,1,55,251,201,62,3,211,66,62,192,211,
- 66,62,48,211,66,0,0,219,66,230,1,40,
- 4,219,67,24,240,205,203,135,58,75,152,254,
- 255,202,128,132,58,49,152,254,161,250,207,131,
- 58,34,152,211,68,62,10,211,66,62,128,211,
- 66,62,11,211,66,62,6,211,66,24,0,62,
- 14,211,66,62,33,211,66,62,1,211,66,62,
- 64,211,66,62,3,211,66,62,209,211,66,62,
- 100,71,219,66,230,1,32,6,5,32,247,195,
- 248,132,219,67,71,58,44,152,184,194,248,132,
- 62,100,71,219,66,230,1,32,6,5,32,247,
- 195,248,132,219,67,62,100,71,219,66,230,1,
- 32,6,5,32,247,195,248,132,219,67,254,133,
- 32,7,62,0,50,74,152,24,17,254,173,32,
- 7,62,1,50,74,152,24,6,254,141,194,248,
- 132,71,209,225,58,49,152,254,132,32,10,62,
- 50,205,2,134,205,144,135,24,27,254,140,32,
- 15,62,110,205,2,134,62,141,184,32,5,205,
- 144,135,24,8,62,10,205,2,134,205,8,134,
- 62,1,50,106,137,205,158,139,237,56,52,246,
- 6,237,57,52,175,183,251,201,62,20,135,237,
- 57,20,175,237,57,21,237,56,16,246,2,237,
- 57,16,237,56,20,95,237,56,21,123,254,10,
- 48,244,237,56,16,230,17,237,57,16,209,225,
- 205,144,135,62,1,50,106,137,205,158,139,237,
- 56,52,246,6,237,57,52,175,183,251,201,209,
- 225,243,219,72,230,1,40,13,62,10,211,66,
- 0,0,219,66,230,192,202,226,132,237,56,52,
- 246,6,237,57,52,62,1,55,251,201,205,203,
- 135,62,1,50,106,137,205,158,139,237,56,52,
- 246,6,237,57,52,183,251,201,209,225,62,1,
- 50,106,137,205,158,139,237,56,52,246,6,237,
- 57,52,62,2,55,251,201,209,225,243,219,72,
- 230,1,202,213,132,62,10,211,66,0,0,219,
- 66,230,192,194,213,132,229,62,1,50,106,137,
- 42,40,152,205,65,143,225,17,3,0,205,111,
- 136,62,6,211,66,58,44,152,211,66,237,56,
- 52,246,6,237,57,52,183,251,201,209,197,237,
- 56,52,230,248,237,57,52,219,72,230,1,32,
- 15,193,225,237,56,52,246,6,237,57,52,62,
- 1,55,251,201,14,23,58,37,152,254,0,40,
- 14,14,2,254,1,32,5,62,140,119,24,3,
- 62,132,119,43,43,197,205,203,135,193,62,1,
- 211,66,62,64,211,66,62,3,211,66,62,193,
- 211,66,62,100,203,39,71,219,66,230,1,32,
- 6,5,32,247,195,229,133,33,238,151,219,67,
- 71,58,44,152,184,194,229,133,119,62,100,71,
- 219,66,230,1,32,6,5,32,247,195,229,133,
- 219,67,35,119,13,32,234,193,225,62,1,50,
- 106,137,205,158,139,237,56,52,246,6,237,57,
- 52,175,183,251,201,33,234,151,35,35,62,255,
- 119,193,225,62,1,50,106,137,205,158,139,237,
- 56,52,246,6,237,57,52,175,251,201,243,61,
- 32,253,251,201,62,3,211,66,62,192,211,66,
- 58,49,152,254,140,32,19,197,229,213,17,181,
- 129,33,185,129,1,2,0,237,176,209,225,193,
- 24,27,229,213,33,187,129,58,49,152,230,15,
- 87,30,2,237,92,25,17,181,129,126,18,19,
- 35,126,18,209,225,58,34,152,246,8,211,68,
- 58,49,152,254,165,40,14,254,164,40,10,62,
- 10,211,66,62,224,211,66,24,25,58,74,152,
- 254,0,40,10,62,10,211,66,62,160,211,66,
- 24,8,62,10,211,66,62,128,211,66,62,11,
- 211,66,62,6,211,66,205,147,143,62,5,211,
- 66,62,224,211,66,62,5,211,66,62,96,211,
- 66,62,5,61,32,253,62,5,211,66,62,224,
- 211,66,62,14,61,32,253,62,5,211,66,62,
- 233,211,66,62,128,211,66,58,181,129,61,32,
- 253,62,1,211,66,62,192,211,66,1,254,19,
- 237,56,46,187,32,6,13,32,247,195,226,134,
- 62,192,211,66,0,0,219,66,203,119,40,250,
- 219,66,203,87,40,250,243,237,56,16,230,17,
- 237,57,16,237,56,20,251,62,5,211,66,62,
- 224,211,66,58,182,129,61,32,253,229,33,181,
- 129,58,183,129,203,63,119,35,58,184,129,119,
- 225,62,10,211,66,62,224,211,66,62,11,211,
- 66,62,118,211,66,62,47,211,68,62,5,211,
- 66,62,233,211,66,58,181,129,61,32,253,62,
- 5,211,66,62,224,211,66,58,182,129,61,32,
- 253,62,5,211,66,62,96,211,66,201,229,213,
- 58,50,152,230,15,87,30,2,237,92,33,187,
- 129,25,17,181,129,126,18,35,19,126,18,209,
- 225,58,71,152,246,8,211,68,58,50,152,254,
- 165,40,14,254,164,40,10,62,10,211,66,62,
- 224,211,66,24,8,62,10,211,66,62,128,211,
- 66,62,11,211,66,62,6,211,66,195,248,135,
- 62,3,211,66,62,192,211,66,197,229,213,17,
- 181,129,33,183,129,1,2,0,237,176,209,225,
- 193,62,47,211,68,62,10,211,66,62,224,211,
- 66,62,11,211,66,62,118,211,66,62,1,211,
- 66,62,0,211,66,205,147,143,195,16,136,62,
- 3,211,66,62,192,211,66,197,229,213,17,181,
- 129,33,183,129,1,2,0,237,176,209,225,193,
- 62,47,211,68,62,10,211,66,62,224,211,66,
- 62,11,211,66,62,118,211,66,205,147,143,62,
- 5,211,66,62,224,211,66,62,5,211,66,62,
- 96,211,66,62,5,61,32,253,62,5,211,66,
- 62,224,211,66,62,14,61,32,253,62,5,211,
- 66,62,233,211,66,62,128,211,66,58,181,129,
- 61,32,253,62,1,211,66,62,192,211,66,1,
- 254,19,237,56,46,187,32,6,13,32,247,195,
- 88,136,62,192,211,66,0,0,219,66,203,119,
- 40,250,219,66,203,87,40,250,62,5,211,66,
- 62,224,211,66,58,182,129,61,32,253,62,5,
- 211,66,62,96,211,66,201,197,14,67,6,0,
- 62,3,211,66,62,192,211,66,62,48,211,66,
- 0,0,219,66,230,1,40,4,219,67,24,240,
- 62,5,211,66,62,233,211,66,62,128,211,66,
- 58,181,129,61,32,253,237,163,29,62,192,211,
- 66,219,66,230,4,40,250,237,163,29,32,245,
- 219,66,230,4,40,250,62,255,71,219,66,230,
- 4,40,3,5,32,247,219,66,230,4,40,250,
- 62,5,211,66,62,224,211,66,58,182,129,61,
- 32,253,62,5,211,66,62,96,211,66,58,71,
- 152,254,1,202,18,137,62,16,211,66,62,56,
- 211,66,62,14,211,66,62,33,211,66,62,1,
- 211,66,62,248,211,66,237,56,48,246,153,230,
- 207,237,57,48,62,3,211,66,62,221,211,66,
- 193,201,58,71,152,211,68,62,10,211,66,62,
- 128,211,66,62,11,211,66,62,6,211,66,62,
- 6,211,66,58,44,152,211,66,62,16,211,66,
- 62,56,211,66,62,48,211,66,0,0,62,14,
- 211,66,62,33,211,66,62,1,211,66,62,248,
- 211,66,237,56,48,246,145,246,8,230,207,237,
- 57,48,62,3,211,66,62,221,211,66,193,201,
- 44,3,1,0,70,69,1,245,197,213,229,175,
- 50,72,152,237,56,16,230,46,237,57,16,237,
- 56,12,62,1,211,66,0,0,219,66,95,230,
- 160,32,3,195,20,139,123,230,96,194,72,139,
- 62,48,211,66,62,1,211,66,62,64,211,66,
- 237,91,40,152,205,207,143,25,43,55,237,82,
- 218,70,139,34,42,152,98,107,58,44,152,190,
- 194,210,138,35,35,62,130,190,194,200,137,62,
- 1,50,48,152,62,175,190,202,82,139,62,132,
- 190,32,44,50,50,152,62,47,50,71,152,229,
- 175,50,106,137,42,40,152,205,65,143,225,54,
- 133,43,70,58,44,152,119,43,112,17,3,0,
- 62,10,205,2,134,205,111,136,195,158,138,62,
- 140,190,32,19,50,50,152,58,233,149,230,4,
- 202,222,138,62,1,50,71,152,195,219,137,126,
- 254,160,250,185,138,254,166,242,185,138,50,50,
- 152,43,126,35,229,213,33,234,149,95,22,0,
- 25,126,254,132,40,18,254,140,40,14,58,50,
- 152,230,15,87,126,31,21,242,65,138,56,2,
- 175,119,58,50,152,230,15,87,58,233,149,230,
- 62,31,21,242,85,138,218,98,138,209,225,195,
- 20,139,58,50,152,33,100,137,230,15,95,22,
- 0,25,126,50,71,152,209,225,58,50,152,254,
- 164,250,135,138,58,73,152,254,0,40,4,54,
- 173,24,2,54,133,43,70,58,44,152,119,43,
- 112,17,3,0,205,70,135,175,50,106,137,205,
- 208,139,58,199,129,237,57,12,58,200,129,237,
- 57,13,237,56,16,246,17,237,57,16,225,209,
- 193,241,251,237,77,62,129,190,194,227,138,54,
- 130,43,70,58,44,152,119,43,112,17,3,0,
- 205,144,135,195,20,139,35,35,126,254,132,194,
- 227,138,175,50,106,137,205,158,139,24,42,58,
- 201,154,254,1,40,7,62,1,50,106,137,24,
- 237,58,106,137,254,1,202,222,138,62,128,166,
- 194,222,138,221,229,221,33,67,152,205,127,142,
- 205,109,144,221,225,225,209,193,241,251,237,77,
- 58,106,137,254,1,202,44,139,58,50,152,254,
- 164,250,44,139,58,73,152,238,1,50,73,152,
- 221,229,221,33,51,152,205,127,142,221,225,62,
- 1,50,106,137,205,158,139,195,13,139,24,208,
- 24,206,24,204,230,64,40,3,195,20,139,195,
- 20,139,43,126,33,8,152,119,35,58,44,152,
- 119,43,237,91,35,152,205,203,135,205,158,139,
- 195,13,139,175,50,78,152,62,3,211,66,62,
- 192,211,66,201,197,33,4,0,57,126,35,102,
- 111,62,1,50,106,137,219,72,205,141,139,193,
- 201,62,1,50,78,152,34,40,152,54,0,35,
- 35,54,0,195,163,139,58,78,152,183,200,229,
- 33,181,129,58,183,129,119,35,58,184,129,119,
- 225,62,47,211,68,62,14,211,66,62,193,211,
- 66,62,10,211,66,62,224,211,66,62,11,211,
- 66,62,118,211,66,195,3,140,58,78,152,183,
- 200,58,71,152,211,68,254,69,40,4,254,70,
- 32,17,58,73,152,254,0,40,10,62,10,211,
- 66,62,160,211,66,24,8,62,10,211,66,62,
- 128,211,66,62,11,211,66,62,6,211,66,62,
- 6,211,66,58,44,152,211,66,62,16,211,66,
- 62,56,211,66,62,48,211,66,0,0,219,66,
- 230,1,40,4,219,67,24,240,62,14,211,66,
- 62,33,211,66,42,40,152,205,65,143,62,1,
- 211,66,62,248,211,66,237,56,48,246,145,246,
- 8,230,207,237,57,48,62,3,211,66,62,221,
- 211,66,201,62,16,211,66,62,56,211,66,62,
- 48,211,66,0,0,219,66,230,1,40,4,219,
- 67,24,240,62,14,211,66,62,33,211,66,62,
- 1,211,66,62,248,211,66,237,56,48,246,153,
- 230,207,237,57,48,62,3,211,66,62,221,211,
- 66,201,229,213,33,234,149,95,22,0,25,126,
- 254,132,40,4,254,140,32,2,175,119,123,209,
- 225,201,6,8,14,0,31,48,1,12,16,250,
- 121,201,33,4,0,57,94,35,86,33,2,0,
- 57,126,35,102,111,221,229,34,89,152,237,83,
- 91,152,221,33,63,152,205,127,142,58,81,152,
- 50,82,152,58,80,152,135,50,80,152,205,162,
- 140,254,3,56,16,58,81,152,135,60,230,15,
- 50,81,152,175,50,80,152,24,23,58,79,152,
- 205,162,140,254,3,48,13,58,81,152,203,63,
- 50,81,152,62,255,50,79,152,58,81,152,50,
- 82,152,58,79,152,135,50,79,152,62,32,50,
- 83,152,50,84,152,237,56,16,230,17,237,57,
- 16,219,72,62,192,50,93,152,62,93,50,94,
- 152,58,93,152,61,50,93,152,32,9,58,94,
- 152,61,50,94,152,40,44,62,170,237,57,20,
- 175,237,57,21,237,56,16,246,2,237,57,16,
- 219,72,230,1,202,29,141,237,56,20,71,237,
- 56,21,120,254,10,48,237,237,56,16,230,17,
- 237,57,16,243,62,14,211,66,62,65,211,66,
- 251,58,39,152,23,23,60,50,39,152,71,58,
- 82,152,160,230,15,40,22,71,14,10,219,66,
- 230,16,202,186,141,219,72,230,1,202,186,141,
- 13,32,239,16,235,42,89,152,237,91,91,152,
- 205,47,131,48,7,61,202,186,141,195,227,141,
- 221,225,33,0,0,201,221,33,55,152,205,127,
- 142,58,84,152,61,50,84,152,40,19,58,82,
- 152,246,1,50,82,152,58,79,152,246,1,50,
- 79,152,195,29,141,221,225,33,1,0,201,221,
- 33,59,152,205,127,142,58,80,152,246,1,50,
- 80,152,58,82,152,135,246,1,50,82,152,58,
- 83,152,61,50,83,152,194,29,141,221,225,33,
- 2,0,201,221,229,33,0,0,57,17,4,0,
- 25,126,50,44,152,230,128,50,85,152,58,85,
- 152,183,40,6,221,33,88,2,24,4,221,33,
- 150,0,58,44,152,183,40,53,60,40,50,60,
- 40,47,61,61,33,86,152,119,35,119,35,54,
- 129,175,50,48,152,221,43,221,229,225,124,181,
- 40,42,33,86,152,17,3,0,205,189,140,17,
- 232,3,27,123,178,32,251,58,48,152,183,40,
- 224,58,44,152,71,62,7,128,230,127,71,58,
- 85,152,176,50,44,152,24,162,221,225,201,183,
- 221,52,0,192,221,52,1,192,221,52,2,192,
- 221,52,3,192,55,201,245,62,1,211,100,241,
- 201,245,62,1,211,96,241,201,33,2,0,57,
- 126,35,102,111,237,56,48,230,175,237,57,48,
- 62,48,237,57,49,125,237,57,32,124,237,57,
- 33,62,0,237,57,34,62,88,237,57,35,62,
- 0,237,57,36,237,57,37,33,128,2,125,237,
- 57,38,124,237,57,39,237,56,48,246,97,230,
- 207,237,57,48,62,0,237,57,0,62,0,211,
- 96,211,100,201,33,2,0,57,126,35,102,111,
- 237,56,48,230,175,237,57,48,62,12,237,57,
- 49,62,76,237,57,32,62,0,237,57,33,237,
- 57,34,125,237,57,35,124,237,57,36,62,0,
- 237,57,37,33,128,2,125,237,57,38,124,237,
- 57,39,237,56,48,246,97,230,207,237,57,48,
- 62,1,211,96,201,33,2,0,57,126,35,102,
- 111,229,237,56,48,230,87,237,57,48,125,237,
- 57,40,124,237,57,41,62,0,237,57,42,62,
- 67,237,57,43,62,0,237,57,44,58,106,137,
- 254,1,32,5,33,6,0,24,3,33,128,2,
- 125,237,57,46,124,237,57,47,237,56,50,230,
- 252,246,2,237,57,50,225,201,33,4,0,57,
- 94,35,86,33,2,0,57,126,35,102,111,237,
- 56,48,230,87,237,57,48,125,237,57,40,124,
- 237,57,41,62,0,237,57,42,62,67,237,57,
- 43,62,0,237,57,44,123,237,57,46,122,237,
- 57,47,237,56,50,230,244,246,0,237,57,50,
- 237,56,48,246,145,230,207,237,57,48,201,213,
- 237,56,46,95,237,56,47,87,237,56,46,111,
- 237,56,47,103,183,237,82,32,235,33,128,2,
- 183,237,82,209,201,213,237,56,38,95,237,56,
- 39,87,237,56,38,111,237,56,39,103,183,237,
- 82,32,235,33,128,2,183,237,82,209,201,245,
- 197,1,52,0,237,120,230,253,237,121,193,241,
- 201,245,197,1,52,0,237,120,246,2,237,121,
- 193,241,201,33,2,0,57,126,35,102,111,126,
- 35,110,103,201,33,0,0,34,102,152,34,96,
- 152,34,98,152,33,202,154,34,104,152,237,91,
- 104,152,42,226,149,183,237,82,17,0,255,25,
- 34,100,152,203,124,40,6,33,0,125,34,100,
- 152,42,104,152,35,35,35,229,205,120,139,193,
- 201,205,186,149,229,42,40,152,35,35,35,229,
- 205,39,144,193,124,230,3,103,221,117,254,221,
- 116,255,237,91,42,152,35,35,35,183,237,82,
- 32,12,17,5,0,42,42,152,205,171,149,242,
- 169,144,42,40,152,229,205,120,139,193,195,198,
- 149,237,91,42,152,42,98,152,25,34,98,152,
- 19,19,19,42,102,152,25,34,102,152,237,91,
- 100,152,33,158,253,25,237,91,102,152,205,171,
- 149,242,214,144,33,0,0,34,102,152,62,1,
- 50,95,152,205,225,144,195,198,149,58,95,152,
- 183,200,237,91,96,152,42,102,152,205,171,149,
- 242,5,145,237,91,102,152,33,98,2,25,237,
- 91,96,152,205,171,149,250,37,145,237,91,96,
- 152,42,102,152,183,237,82,32,7,42,98,152,
- 125,180,40,13,237,91,102,152,42,96,152,205,
- 171,149,242,58,145,237,91,104,152,42,102,152,
- 25,35,35,35,229,205,120,139,193,175,50,95,
- 152,201,195,107,139,205,206,149,250,255,243,205,
- 225,144,251,58,230,149,183,194,198,149,17,1,
- 0,42,98,152,205,171,149,250,198,149,62,1,
- 50,230,149,237,91,96,152,42,104,152,25,221,
- 117,252,221,116,253,237,91,104,152,42,96,152,
- 25,35,35,35,221,117,254,221,116,255,35,35,
- 35,229,205,39,144,124,230,3,103,35,35,35,
- 221,117,250,221,116,251,235,221,110,252,221,102,
- 253,115,35,114,35,54,4,62,1,211,100,211,
- 84,195,198,149,33,0,0,34,102,152,34,96,
- 152,34,98,152,33,202,154,34,104,152,237,91,
- 104,152,42,226,149,183,237,82,17,0,255,25,
- 34,100,152,33,109,152,54,0,33,107,152,229,
- 205,240,142,193,62,47,50,34,152,62,132,50,
- 49,152,205,241,145,205,61,145,58,39,152,60,
- 50,39,152,24,241,205,206,149,251,255,33,109,
- 152,126,183,202,198,149,110,221,117,251,33,109,
- 152,54,0,221,126,251,254,1,40,28,254,3,
- 40,101,254,4,202,190,147,254,5,202,147,147,
- 254,8,40,87,33,107,152,229,205,240,142,195,
- 198,149,58,201,154,183,32,21,33,111,152,126,
- 50,229,149,205,52,144,33,110,152,110,38,0,
- 229,205,11,142,193,237,91,96,152,42,104,152,
- 25,221,117,254,221,116,255,35,35,54,2,17,
- 2,0,43,43,115,35,114,58,44,152,35,35,
- 119,58,228,149,35,119,62,1,211,100,211,84,
- 62,1,50,201,154,24,169,205,153,142,58,231,
- 149,183,40,250,175,50,231,149,33,110,152,126,
- 254,255,40,91,58,233,149,230,63,183,40,83,
- 94,22,0,33,234,149,25,126,183,40,13,33,
- 110,152,94,33,234,150,25,126,254,3,32,36,
- 205,81,148,125,180,33,110,152,94,22,0,40,
- 17,33,234,149,25,54,0,33,107,152,229,205,
- 240,142,193,195,198,149,33,234,150,25,54,0,
- 33,110,152,94,22,0,33,234,149,25,126,50,
- 49,152,254,132,32,37,62,47,50,34,152,42,
- 107,152,229,33,110,152,229,205,174,140,193,193,
- 125,180,33,110,152,94,22,0,33,234,150,202,
- 117,147,25,52,195,120,147,58,49,152,254,140,
- 32,7,62,1,50,34,152,24,210,62,32,50,
- 106,152,24,19,58,49,152,95,58,106,152,163,
- 183,58,106,152,32,11,203,63,50,106,152,58,
- 106,152,183,32,231,254,2,40,51,254,4,40,
- 38,254,8,40,26,254,16,40,13,254,32,32,
- 158,62,165,50,49,152,62,69,24,190,62,164,
- 50,49,152,62,70,24,181,62,163,50,49,152,
- 175,24,173,62,162,50,49,152,62,1,24,164,
- 62,161,50,49,152,62,3,24,155,25,54,0,
- 221,126,251,254,8,40,7,58,230,149,183,202,
- 32,146,33,107,152,229,205,240,142,193,211,84,
- 195,198,149,237,91,96,152,42,104,152,25,221,
- 117,254,221,116,255,35,35,54,6,17,2,0,
- 43,43,115,35,114,58,228,149,35,35,119,58,
- 233,149,35,119,205,146,142,195,32,146,237,91,
- 96,152,42,104,152,25,229,205,160,142,193,58,
- 231,149,183,40,250,175,50,231,149,243,237,91,
- 96,152,42,104,152,25,221,117,254,221,116,255,
- 78,35,70,221,113,252,221,112,253,89,80,42,
- 98,152,183,237,82,34,98,152,203,124,40,19,
- 33,0,0,34,98,152,34,102,152,34,96,152,
- 62,1,50,95,152,24,40,221,94,252,221,86,
- 253,19,19,19,42,96,152,25,34,96,152,237,
- 91,100,152,33,158,253,25,237,91,96,152,205,
- 171,149,242,55,148,33,0,0,34,96,152,175,
- 50,230,149,251,195,32,146,245,62,1,50,231,
- 149,62,16,237,57,0,211,80,241,251,237,77,
- 201,205,186,149,229,229,33,0,0,34,37,152,
- 33,110,152,126,50,234,151,58,44,152,33,235,
- 151,119,221,54,253,0,221,54,254,0,195,230,
- 148,33,236,151,54,175,33,3,0,229,33,234,
- 151,229,205,174,140,193,193,33,236,151,126,254,
- 255,40,74,33,245,151,110,221,117,255,33,249,
- 151,126,221,166,255,221,119,255,33,253,151,126,
- 221,166,255,221,119,255,58,232,149,95,221,126,
- 255,163,221,119,255,183,40,15,230,191,33,110,
- 152,94,22,0,33,234,149,25,119,24,12,33,
- 110,152,94,22,0,33,234,149,25,54,132,33,
- 0,0,195,198,149,221,110,253,221,102,254,35,
- 221,117,253,221,116,254,17,32,0,221,110,253,
- 221,102,254,205,171,149,250,117,148,58,233,149,
- 203,87,40,84,33,1,0,34,37,152,221,54,
- 253,0,221,54,254,0,24,53,33,236,151,54,
- 175,33,3,0,229,33,234,151,229,205,174,140,
- 193,193,33,236,151,126,254,255,40,14,33,110,
- 152,94,22,0,33,234,149,25,54,140,24,159,
- 221,110,253,221,102,254,35,221,117,253,221,116,
- 254,17,32,0,221,110,253,221,102,254,205,171,
- 149,250,12,149,33,2,0,34,37,152,221,54,
- 253,0,221,54,254,0,24,54,33,236,151,54,
- 175,33,3,0,229,33,234,151,229,205,174,140,
- 193,193,33,236,151,126,254,255,40,15,33,110,
- 152,94,22,0,33,234,149,25,54,132,195,211,
- 148,221,110,253,221,102,254,35,221,117,253,221,
- 116,254,17,32,0,221,110,253,221,102,254,205,
- 171,149,250,96,149,33,1,0,195,198,149,124,
- 170,250,179,149,237,82,201,124,230,128,237,82,
- 60,201,225,253,229,221,229,221,33,0,0,221,
- 57,233,221,249,221,225,253,225,201,233,225,253,
- 229,221,229,221,33,0,0,221,57,94,35,86,
- 35,235,57,249,235,233,0,0,0,0,0,0,
- 62,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 175,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,133,1,0,0,0,63,
- 255,255,255,255,0,0,0,63,0,0,0,0,
- 0,0,0,0,0,0,0,24,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0
- } ;
-
-#endif
diff --git a/drivers/net/appletalk/cops_ltdrv.h b/drivers/net/appletalk/cops_ltdrv.h
deleted file mode 100644
index c699b1ad31da..000000000000
--- a/drivers/net/appletalk/cops_ltdrv.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * The firmware this driver downloads into the Localtalk card is a
- * separate program and is not GPL'd source code, even though the Linux
- * side driver and the routine that loads this data into the card are.
- *
- * It is taken from the COPS SDK and is under the following license
- *
- * This material is licensed to you strictly for use in conjunction with
- * the use of COPS LocalTalk adapters.
- * There is no charge for this SDK. And no waranty express or implied
- * about its fitness for any purpose. However, we will cheerefully
- * refund every penny you paid for this SDK...
- * Regards,
- *
- * Thomas F. Divine
- * Chief Scientist
- */
-
-
-/* cops_ltdrv.h: LocalTalk driver firmware dump for Linux.
- *
- * Authors:
- * - Jay Schulist <jschlst@samba.org>
- */
-
-
-#ifdef CONFIG_COPS_TANGENT
-
-static const unsigned char ltdrv_code[] = {
- 58,3,0,50,148,10,33,143,15,62,85,119,
- 190,32,9,62,170,119,190,32,3,35,24,241,
- 34,146,10,249,17,150,10,33,143,15,183,237,
- 82,77,68,11,107,98,19,54,0,237,176,62,
- 16,237,57,51,62,0,237,57,50,237,57,54,
- 62,12,237,57,49,62,195,33,39,2,50,56,
- 0,34,57,0,237,86,205,30,2,251,205,60,
- 10,24,169,67,111,112,121,114,105,103,104,116,
- 32,40,99,41,32,49,57,56,56,45,49,57,
- 57,50,44,32,80,114,105,110,116,105,110,103,
- 32,67,111,109,109,117,110,105,99,97,116,105,
- 111,110,115,32,65,115,115,111,99,105,97,116,
- 101,115,44,32,73,110,99,46,65,108,108,32,
- 114,105,103,104,116,115,32,114,101,115,101,114,
- 118,101,100,46,32,32,4,4,22,40,255,60,
- 4,96,10,224,6,0,7,126,2,64,11,246,
- 12,6,13,0,14,193,15,0,5,96,3,192,
- 1,0,9,8,62,3,211,82,62,192,211,82,
- 201,62,3,211,82,62,213,211,82,201,62,5,
- 211,82,62,224,211,82,201,62,5,211,82,62,
- 224,211,82,201,62,5,211,82,62,96,211,82,
- 201,6,28,33,180,1,14,82,237,163,194,4,
- 2,33,39,2,34,64,0,58,3,0,230,1,
- 192,62,11,237,121,62,118,237,121,201,33,182,
- 10,54,132,205,253,1,201,245,197,213,229,42,
- 150,10,14,83,17,98,2,67,20,237,162,58,
- 179,1,95,219,82,230,1,32,6,29,32,247,
- 195,17,3,62,1,211,82,219,82,95,230,160,
- 32,10,237,162,32,225,21,32,222,195,15,3,
- 237,162,123,230,96,194,21,3,62,48,211,82,
- 62,1,211,82,175,211,82,237,91,150,10,43,
- 55,237,82,218,19,3,34,152,10,98,107,58,
- 154,10,190,32,81,62,1,50,158,10,35,35,
- 62,132,190,32,44,54,133,43,70,58,154,10,
- 119,43,112,17,3,0,205,137,3,62,16,211,
- 82,62,56,211,82,205,217,1,42,150,10,14,
- 83,17,98,2,67,20,58,178,1,95,195,59,
- 2,62,129,190,194,227,2,54,130,43,70,58,
- 154,10,119,43,112,17,3,0,205,137,3,195,
- 254,2,35,35,126,254,132,194,227,2,205,61,
- 3,24,20,62,128,166,194,222,2,221,229,221,
- 33,175,10,205,93,6,205,144,7,221,225,225,
- 209,193,241,251,237,77,221,229,221,33,159,10,
- 205,93,6,221,225,205,61,3,195,247,2,24,
- 237,24,235,24,233,230,64,40,2,24,227,24,
- 225,175,50,179,10,205,208,1,201,197,33,4,
- 0,57,126,35,102,111,205,51,3,193,201,62,
- 1,50,179,10,34,150,10,54,0,58,179,10,
- 183,200,62,14,211,82,62,193,211,82,62,10,
- 211,82,62,224,211,82,62,6,211,82,58,154,
- 10,211,82,62,16,211,82,62,56,211,82,62,
- 48,211,82,219,82,230,1,40,4,219,83,24,
- 242,62,14,211,82,62,33,211,82,62,1,211,
- 82,62,9,211,82,62,32,211,82,205,217,1,
- 201,14,83,205,208,1,24,23,14,83,205,208,
- 1,205,226,1,58,174,1,61,32,253,205,244,
- 1,58,174,1,61,32,253,205,226,1,58,175,
- 1,61,32,253,62,5,211,82,62,233,211,82,
- 62,128,211,82,58,176,1,61,32,253,237,163,
- 27,62,192,211,82,219,82,230,4,40,250,237,
- 163,27,122,179,32,243,219,82,230,4,40,250,
- 58,178,1,71,219,82,230,4,40,3,5,32,
- 247,219,82,230,4,40,250,205,235,1,58,177,
- 1,61,32,253,205,244,1,201,229,213,35,35,
- 126,230,128,194,145,4,43,58,154,10,119,43,
- 70,33,181,10,119,43,112,17,3,0,243,62,
- 10,211,82,219,82,230,128,202,41,4,209,225,
- 62,1,55,251,201,205,144,3,58,180,10,254,
- 255,202,127,4,205,217,1,58,178,1,71,219,
- 82,230,1,32,6,5,32,247,195,173,4,219,
- 83,71,58,154,10,184,194,173,4,58,178,1,
- 71,219,82,230,1,32,6,5,32,247,195,173,
- 4,219,83,58,178,1,71,219,82,230,1,32,
- 6,5,32,247,195,173,4,219,83,254,133,194,
- 173,4,58,179,1,24,4,58,179,1,135,61,
- 32,253,209,225,205,137,3,205,61,3,183,251,
- 201,209,225,243,62,10,211,82,219,82,230,128,
- 202,164,4,62,1,55,251,201,205,144,3,205,
- 61,3,183,251,201,209,225,62,2,55,251,201,
- 243,62,14,211,82,62,33,211,82,251,201,33,
- 4,0,57,94,35,86,33,2,0,57,126,35,
- 102,111,221,229,34,193,10,237,83,195,10,221,
- 33,171,10,205,93,6,58,185,10,50,186,10,
- 58,184,10,135,50,184,10,205,112,6,254,3,
- 56,16,58,185,10,135,60,230,15,50,185,10,
- 175,50,184,10,24,23,58,183,10,205,112,6,
- 254,3,48,13,58,185,10,203,63,50,185,10,
- 62,255,50,183,10,58,185,10,50,186,10,58,
- 183,10,135,50,183,10,62,32,50,187,10,50,
- 188,10,6,255,219,82,230,16,32,3,5,32,
- 247,205,180,4,6,40,219,82,230,16,40,3,
- 5,32,247,62,10,211,82,219,82,230,128,194,
- 46,5,219,82,230,16,40,214,237,95,71,58,
- 186,10,160,230,15,40,32,71,14,10,62,10,
- 211,82,219,82,230,128,202,119,5,205,180,4,
- 195,156,5,219,82,230,16,202,156,5,13,32,
- 229,16,225,42,193,10,237,91,195,10,205,252,
- 3,48,7,61,202,156,5,195,197,5,221,225,
- 33,0,0,201,221,33,163,10,205,93,6,58,
- 188,10,61,50,188,10,40,19,58,186,10,246,
- 1,50,186,10,58,183,10,246,1,50,183,10,
- 195,46,5,221,225,33,1,0,201,221,33,167,
- 10,205,93,6,58,184,10,246,1,50,184,10,
- 58,186,10,135,246,1,50,186,10,58,187,10,
- 61,50,187,10,194,46,5,221,225,33,2,0,
- 201,221,229,33,0,0,57,17,4,0,25,126,
- 50,154,10,230,128,50,189,10,58,189,10,183,
- 40,6,221,33,88,2,24,4,221,33,150,0,
- 58,154,10,183,40,49,60,40,46,61,33,190,
- 10,119,35,119,35,54,129,175,50,158,10,221,
- 43,221,229,225,124,181,40,42,33,190,10,17,
- 3,0,205,206,4,17,232,3,27,123,178,32,
- 251,58,158,10,183,40,224,58,154,10,71,62,
- 7,128,230,127,71,58,189,10,176,50,154,10,
- 24,166,221,225,201,183,221,52,0,192,221,52,
- 1,192,221,52,2,192,221,52,3,192,55,201,
- 6,8,14,0,31,48,1,12,16,250,121,201,
- 33,2,0,57,94,35,86,35,78,35,70,35,
- 126,35,102,105,79,120,68,103,237,176,201,33,
- 2,0,57,126,35,102,111,62,17,237,57,48,
- 125,237,57,40,124,237,57,41,62,0,237,57,
- 42,62,64,237,57,43,62,0,237,57,44,33,
- 128,2,125,237,57,46,124,237,57,47,62,145,
- 237,57,48,211,68,58,149,10,211,66,201,33,
- 2,0,57,126,35,102,111,62,33,237,57,48,
- 62,64,237,57,32,62,0,237,57,33,237,57,
- 34,125,237,57,35,124,237,57,36,62,0,237,
- 57,37,33,128,2,125,237,57,38,124,237,57,
- 39,62,97,237,57,48,211,67,58,149,10,211,
- 66,201,237,56,46,95,237,56,47,87,237,56,
- 46,111,237,56,47,103,183,237,82,32,235,33,
- 128,2,183,237,82,201,237,56,38,95,237,56,
- 39,87,237,56,38,111,237,56,39,103,183,237,
- 82,32,235,33,128,2,183,237,82,201,205,106,
- 10,221,110,6,221,102,7,126,35,110,103,195,
- 118,10,205,106,10,33,0,0,34,205,10,34,
- 198,10,34,200,10,33,143,15,34,207,10,237,
- 91,207,10,42,146,10,183,237,82,17,0,255,
- 25,34,203,10,203,124,40,6,33,0,125,34,
- 203,10,42,207,10,229,205,37,3,195,118,10,
- 205,106,10,229,42,150,10,35,35,35,229,205,
- 70,7,193,124,230,3,103,221,117,254,221,116,
- 255,237,91,152,10,35,35,35,183,237,82,32,
- 12,17,5,0,42,152,10,205,91,10,242,203,
- 7,42,150,10,229,205,37,3,195,118,10,237,
- 91,152,10,42,200,10,25,34,200,10,42,205,
- 10,25,34,205,10,237,91,203,10,33,158,253,
- 25,237,91,205,10,205,91,10,242,245,7,33,
- 0,0,34,205,10,62,1,50,197,10,205,5,
- 8,33,0,0,57,249,195,118,10,205,106,10,
- 58,197,10,183,202,118,10,237,91,198,10,42,
- 205,10,205,91,10,242,46,8,237,91,205,10,
- 33,98,2,25,237,91,198,10,205,91,10,250,
- 78,8,237,91,198,10,42,205,10,183,237,82,
- 32,7,42,200,10,125,180,40,13,237,91,205,
- 10,42,198,10,205,91,10,242,97,8,237,91,
- 207,10,42,205,10,25,229,205,37,3,175,50,
- 197,10,195,118,10,205,29,3,33,0,0,57,
- 249,195,118,10,205,106,10,58,202,10,183,40,
- 22,205,14,7,237,91,209,10,19,19,19,205,
- 91,10,242,139,8,33,1,0,195,118,10,33,
- 0,0,195,118,10,205,126,10,252,255,205,108,
- 8,125,180,194,118,10,237,91,200,10,33,0,
- 0,205,91,10,242,118,10,237,91,207,10,42,
- 198,10,25,221,117,254,221,116,255,35,35,35,
- 229,205,70,7,193,124,230,3,103,35,35,35,
- 221,117,252,221,116,253,229,221,110,254,221,102,
- 255,229,33,212,10,229,205,124,6,193,193,221,
- 110,252,221,102,253,34,209,10,33,211,10,54,
- 4,33,209,10,227,205,147,6,193,62,1,50,
- 202,10,243,221,94,252,221,86,253,42,200,10,
- 183,237,82,34,200,10,203,124,40,17,33,0,
- 0,34,200,10,34,205,10,34,198,10,50,197,
- 10,24,37,221,94,252,221,86,253,42,198,10,
- 25,34,198,10,237,91,203,10,33,158,253,25,
- 237,91,198,10,205,91,10,242,68,9,33,0,
- 0,34,198,10,205,5,8,33,0,0,57,249,
- 251,195,118,10,205,106,10,33,49,13,126,183,
- 40,16,205,42,7,237,91,47,13,19,19,19,
- 205,91,10,242,117,9,58,142,15,198,1,50,
- 142,15,195,118,10,33,49,13,126,254,1,40,
- 25,254,3,202,7,10,254,5,202,21,10,33,
- 49,13,54,0,33,47,13,229,205,207,6,195,
- 118,10,58,141,15,183,32,72,33,51,13,126,
- 50,149,10,205,86,7,33,50,13,126,230,127,
- 183,32,40,58,142,15,230,127,50,142,15,183,
- 32,5,198,1,50,142,15,33,50,13,126,111,
- 23,159,103,203,125,58,142,15,40,5,198,128,
- 50,142,15,33,50,13,119,33,50,13,126,111,
- 23,159,103,229,205,237,5,193,33,211,10,54,
- 2,33,2,0,34,209,10,58,154,10,33,212,
- 10,119,58,148,10,33,213,10,119,33,209,10,
- 229,205,147,6,193,24,128,42,47,13,229,33,
- 50,13,229,205,191,4,193,24,239,33,211,10,
- 54,6,33,3,0,34,209,10,58,154,10,33,
- 212,10,119,58,148,10,33,213,10,119,33,214,
- 10,54,5,33,209,10,229,205,147,6,24,200,
- 205,106,10,33,49,13,54,0,33,47,13,229,
- 205,207,6,33,209,10,227,205,147,6,193,205,
- 80,9,205,145,8,24,248,124,170,250,99,10,
- 237,82,201,124,230,128,237,82,60,201,225,253,
- 229,221,229,221,33,0,0,221,57,233,221,249,
- 221,225,253,225,201,233,225,253,229,221,229,221,
- 33,0,0,221,57,94,35,86,35,235,57,249,
- 235,233,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0
- } ;
-
-#endif
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 5e39641ea887..3a89349dc918 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -324,14 +324,12 @@ static int b53_mmap_probe(struct platform_device *pdev)
return b53_switch_register(dev);
}
-static int b53_mmap_remove(struct platform_device *pdev)
+static void b53_mmap_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
if (dev)
b53_switch_remove(dev);
-
- return 0;
}
static void b53_mmap_shutdown(struct platform_device *pdev)
@@ -372,7 +370,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
- .remove = b53_mmap_remove,
+ .remove_new = b53_mmap_remove,
.shutdown = b53_mmap_shutdown,
.driver = {
.name = "b53-switch",
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index bcb44034404d..f3f95332ff17 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -657,17 +657,15 @@ static int b53_srab_probe(struct platform_device *pdev)
return b53_switch_register(dev);
}
-static int b53_srab_remove(struct platform_device *pdev)
+static void b53_srab_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
if (!dev)
- return 0;
+ return;
b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
-
- return 0;
}
static void b53_srab_shutdown(struct platform_device *pdev)
@@ -684,7 +682,7 @@ static void b53_srab_shutdown(struct platform_device *pdev)
static struct platform_driver b53_srab_driver = {
.probe = b53_srab_probe,
- .remove = b53_srab_remove,
+ .remove_new = b53_srab_remove,
.shutdown = b53_srab_shutdown,
.driver = {
.name = "b53-srab-switch",
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 72374b066f64..0b62bd78ac50 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1537,12 +1537,12 @@ out_clk:
return ret;
}
-static int bcm_sf2_sw_remove(struct platform_device *pdev)
+static void bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
if (!priv)
- return 0;
+ return;
priv->wol_ports_mask = 0;
/* Disable interrupts */
@@ -1554,8 +1554,6 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
-
- return 0;
}
static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
@@ -1601,7 +1599,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
- .remove = bcm_sf2_sw_remove,
+ .remove_new = bcm_sf2_sw_remove,
.shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 11ef1d7ea229..beda1e9d350f 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -2060,18 +2060,16 @@ err_ptp_setup:
return ret;
}
-static int hellcreek_remove(struct platform_device *pdev)
+static void hellcreek_remove(struct platform_device *pdev)
{
struct hellcreek *hellcreek = platform_get_drvdata(pdev);
if (!hellcreek)
- return 0;
+ return;
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
-
- return 0;
}
static void hellcreek_shutdown(struct platform_device *pdev)
@@ -2107,7 +2105,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match);
static struct platform_driver hellcreek_driver = {
.probe = hellcreek_probe,
- .remove = hellcreek_remove,
+ .remove_new = hellcreek_remove,
.shutdown = hellcreek_shutdown,
.driver = {
.name = "hellcreek",
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 3c76a1a14aee..25abf2caf5fb 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -2207,13 +2207,13 @@ put_mdio_node:
return err;
}
-static int gswip_remove(struct platform_device *pdev)
+static void gswip_remove(struct platform_device *pdev)
{
struct gswip_priv *priv = platform_get_drvdata(pdev);
int i;
if (!priv)
- return 0;
+ return;
/* disable the switch */
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
@@ -2228,8 +2228,6 @@ static int gswip_remove(struct platform_device *pdev)
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
-
- return 0;
}
static void gswip_shutdown(struct platform_device *pdev)
@@ -2266,7 +2264,7 @@ MODULE_DEVICE_TABLE(of, gswip_of_match);
static struct platform_driver gswip_driver = {
.probe = gswip_probe,
- .remove = gswip_remove,
+ .remove_new = gswip_remove,
.shutdown = gswip_shutdown,
.driver = {
.name = "gswip",
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 48360cc9fc68..49459a50dbc8 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
ksz_switch-objs := ksz_common.o
-ksz_switch-objs += ksz9477.o
+ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o
ksz_switch-objs += ksz8795.o
ksz_switch-objs += lan937x_main.o
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 7a57c6088f80..3c9dae53e4d8 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -323,13 +323,6 @@
((addr) + REG_PORT_1_CTRL_0 + (port) * \
(REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0))
-#define REG_SW_MAC_ADDR_0 0x68
-#define REG_SW_MAC_ADDR_1 0x69
-#define REG_SW_MAC_ADDR_2 0x6A
-#define REG_SW_MAC_ADDR_3 0x6B
-#define REG_SW_MAC_ADDR_4 0x6C
-#define REG_SW_MAC_ADDR_5 0x6D
-
#define TABLE_EXT_SELECT_S 5
#define TABLE_EEE_V 1
#define TABLE_ACL_V 2
@@ -442,20 +435,6 @@
#define TOS_PRIO_M KS_PRIO_M
#define TOS_PRIO_S KS_PRIO_S
-#define REG_SW_CTRL_20 0xA3
-
-#define SW_GMII_DRIVE_STRENGTH_S 4
-#define SW_DRIVE_STRENGTH_M 0x7
-#define SW_DRIVE_STRENGTH_2MA 0
-#define SW_DRIVE_STRENGTH_4MA 1
-#define SW_DRIVE_STRENGTH_8MA 2
-#define SW_DRIVE_STRENGTH_12MA 3
-#define SW_DRIVE_STRENGTH_16MA 4
-#define SW_DRIVE_STRENGTH_20MA 5
-#define SW_DRIVE_STRENGTH_24MA 6
-#define SW_DRIVE_STRENGTH_28MA 7
-#define SW_MII_DRIVE_STRENGTH_S 0
-
#define REG_SW_CTRL_21 0xA4
#define SW_IPV6_MLD_OPTION BIT(3)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 83b7f2d5c1ea..cde8ef33d029 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1004,6 +1004,8 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* clear pending interrupts */
if (dev->info->internal_phy[port])
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
+
+ ksz9477_port_acl_init(dev, port);
}
void ksz9477_config_cpu_port(struct dsa_switch *ds)
@@ -1141,6 +1143,83 @@ int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
}
+/* The KSZ9477 provides following HW features to accelerate
+ * HSR frames handling:
+ *
+ * 1. TX PACKET DUPLICATION FROM HOST TO SWITCH
+ * 2. RX PACKET DUPLICATION DISCARDING
+ * 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING
+ *
+ * Only one from point 1. has the NETIF_F* flag available.
+ *
+ * Ones from point 2 and 3 are "best effort" - i.e. those will
+ * work correctly most of the time, but it may happen that some
+ * frames will not be caught - to be more specific; there is a race
+ * condition in hardware such that, when duplicate packets are received
+ * on member ports very close in time to each other, the hardware fails
+ * to detect that they are duplicates.
+ *
+ * Hence, the SW needs to handle those special cases. However, the speed
+ * up gain is considerable when above features are used.
+ *
+ * Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames
+ * can be forwarded in the switch fabric between HSR ports.
+ */
+#define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD)
+
+void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr)
+{
+ struct ksz_device *dev = ds->priv;
+ struct net_device *slave;
+ struct dsa_port *hsr_dp;
+ u8 data, hsr_ports = 0;
+
+ /* Program which port(s) shall support HSR */
+ ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port));
+
+ /* Forward frames between HSR ports (i.e. bridge together HSR ports) */
+ if (dev->hsr_ports) {
+ dsa_hsr_foreach_port(hsr_dp, ds, hsr)
+ hsr_ports |= BIT(hsr_dp->index);
+
+ hsr_ports |= BIT(dsa_upstream_port(ds, port));
+ dsa_hsr_foreach_port(hsr_dp, ds, hsr)
+ ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports);
+ }
+
+ if (!dev->hsr_ports) {
+ /* Enable discarding of received HSR frames */
+ ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data);
+ data |= HSR_DUPLICATE_DISCARD;
+ data &= ~HSR_NODE_UNICAST;
+ ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data);
+ }
+
+ /* Enable per port self-address filtering.
+ * The global self-address filtering has already been enabled in the
+ * ksz9477_reset_switch() function.
+ */
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true);
+
+ /* Setup HW supported features for lan HSR ports */
+ slave = dsa_to_port(ds, port)->slave;
+ slave->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
+}
+
+void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr)
+{
+ struct ksz_device *dev = ds->priv;
+
+ /* Clear port HSR support */
+ ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0);
+
+ /* Disable forwarding frames between HSR ports */
+ ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port)));
+
+ /* Disable per port self-address filtering */
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false);
+}
+
int ksz9477_switch_init(struct ksz_device *dev)
{
u8 data8;
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index a6f425866a29..f90e2e8ebe80 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -56,5 +56,43 @@ int ksz9477_reset_switch(struct ksz_device *dev);
int ksz9477_switch_init(struct ksz_device *dev);
void ksz9477_switch_exit(struct ksz_device *dev);
void ksz9477_port_queue_split(struct ksz_device *dev, int port);
+void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr);
+void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr);
+
+int ksz9477_port_acl_init(struct ksz_device *dev, int port);
+void ksz9477_port_acl_free(struct ksz_device *dev, int port);
+int ksz9477_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int ksz9477_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+
+#define KSZ9477_ACL_ENTRY_SIZE 18
+#define KSZ9477_ACL_MAX_ENTRIES 16
+
+struct ksz9477_acl_entry {
+ u8 entry[KSZ9477_ACL_ENTRY_SIZE];
+ unsigned long cookie;
+ u32 prio;
+};
+
+struct ksz9477_acl_entries {
+ struct ksz9477_acl_entry entries[KSZ9477_ACL_MAX_ENTRIES];
+ int entries_count;
+};
+
+struct ksz9477_acl_priv {
+ struct ksz9477_acl_entries acles;
+};
+
+void ksz9477_acl_remove_entries(struct ksz_device *dev, int port,
+ struct ksz9477_acl_entries *acles,
+ unsigned long cookie);
+int ksz9477_acl_write_list(struct ksz_device *dev, int port);
+int ksz9477_sort_acl_entries(struct ksz_device *dev, int port);
+void ksz9477_acl_action_rule_cfg(u8 *entry, bool force_prio, u8 prio_val);
+void ksz9477_acl_processing_rule_set_action(u8 *entry, u8 action_idx);
+void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port,
+ u16 ethtype, u8 *src_mac, u8 *dst_mac,
+ unsigned long cookie, u32 prio);
#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_acl.c b/drivers/net/dsa/microchip/ksz9477_acl.c
new file mode 100644
index 000000000000..06d74c19eb94
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477_acl.c
@@ -0,0 +1,1436 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+
+/* Access Control List (ACL) structure:
+ *
+ * There are multiple groups of registers involved in ACL configuration:
+ *
+ * - Matching Rules: These registers define the criteria for matching incoming
+ * packets based on their header information (Layer 2 MAC, Layer 3 IP, or
+ * Layer 4 TCP/UDP). Different register settings are used depending on the
+ * matching rule mode (MD) and the Enable (ENB) settings.
+ *
+ * - Action Rules: These registers define how the ACL should modify the packet's
+ * priority, VLAN tag priority, and forwarding map once a matching rule has
+ * been triggered. The settings vary depending on whether the matching rule is
+ * in Count Mode (MD = 01 and ENB = 00) or not.
+ *
+ * - Processing Rules: These registers control the overall behavior of the ACL,
+ * such as selecting which matching rule to apply first, enabling/disabling
+ * specific rules, or specifying actions for matched packets.
+ *
+ * ACL Structure:
+ * +----------------------+
+ * +----------------------+ | (optional) |
+ * | Matching Rules | | Matching Rules |
+ * | (Layer 2, 3, 4) | | (Layer 2, 3, 4) |
+ * +----------------------+ +----------------------+
+ * | |
+ * \___________________________/
+ * v
+ * +----------------------+
+ * | Processing Rules |
+ * | (action idx, |
+ * | matching rule set) |
+ * +----------------------+
+ * |
+ * v
+ * +----------------------+
+ * | Action Rules |
+ * | (Modify Priority, |
+ * | Forwarding Map, |
+ * | VLAN tag, etc) |
+ * +----------------------+
+ */
+
+#include <linux/bitops.h>
+
+#include "ksz9477.h"
+#include "ksz9477_reg.h"
+#include "ksz_common.h"
+
+#define KSZ9477_PORT_ACL_0 0x600
+
+enum ksz9477_acl_port_access {
+ KSZ9477_ACL_PORT_ACCESS_0 = 0x00,
+ KSZ9477_ACL_PORT_ACCESS_1 = 0x01,
+ KSZ9477_ACL_PORT_ACCESS_2 = 0x02,
+ KSZ9477_ACL_PORT_ACCESS_3 = 0x03,
+ KSZ9477_ACL_PORT_ACCESS_4 = 0x04,
+ KSZ9477_ACL_PORT_ACCESS_5 = 0x05,
+ KSZ9477_ACL_PORT_ACCESS_6 = 0x06,
+ KSZ9477_ACL_PORT_ACCESS_7 = 0x07,
+ KSZ9477_ACL_PORT_ACCESS_8 = 0x08,
+ KSZ9477_ACL_PORT_ACCESS_9 = 0x09,
+ KSZ9477_ACL_PORT_ACCESS_A = 0x0A,
+ KSZ9477_ACL_PORT_ACCESS_B = 0x0B,
+ KSZ9477_ACL_PORT_ACCESS_C = 0x0C,
+ KSZ9477_ACL_PORT_ACCESS_D = 0x0D,
+ KSZ9477_ACL_PORT_ACCESS_E = 0x0E,
+ KSZ9477_ACL_PORT_ACCESS_F = 0x0F,
+ KSZ9477_ACL_PORT_ACCESS_10 = 0x10,
+ KSZ9477_ACL_PORT_ACCESS_11 = 0x11
+};
+
+#define KSZ9477_ACL_MD_MASK GENMASK(5, 4)
+#define KSZ9477_ACL_MD_DISABLE 0
+#define KSZ9477_ACL_MD_L2_MAC 1
+#define KSZ9477_ACL_MD_L3_IP 2
+#define KSZ9477_ACL_MD_L4_TCP_UDP 3
+
+#define KSZ9477_ACL_ENB_MASK GENMASK(3, 2)
+#define KSZ9477_ACL_ENB_L2_COUNTER 0
+#define KSZ9477_ACL_ENB_L2_TYPE 1
+#define KSZ9477_ACL_ENB_L2_MAC 2
+#define KSZ9477_ACL_ENB_L2_MAC_TYPE 3
+
+/* only IPv4 src or dst can be used with mask */
+#define KSZ9477_ACL_ENB_L3_IPV4_ADDR_MASK 1
+/* only IPv4 src and dst can be used without mask */
+#define KSZ9477_ACL_ENB_L3_IPV4_ADDR_SRC_DST 2
+
+#define KSZ9477_ACL_ENB_L4_IP_PROTO 0
+#define KSZ9477_ACL_ENB_L4_TCP_SRC_DST_PORT 1
+#define KSZ9477_ACL_ENB_L4_UDP_SRC_DST_PORT 2
+#define KSZ9477_ACL_ENB_L4_TCP_SEQ_NUMBER 3
+
+#define KSZ9477_ACL_SD_SRC BIT(1)
+#define KSZ9477_ACL_SD_DST 0
+#define KSZ9477_ACL_EQ_EQUAL BIT(0)
+#define KSZ9477_ACL_EQ_NOT_EQUAL 0
+
+#define KSZ9477_ACL_PM_M GENMASK(7, 6)
+#define KSZ9477_ACL_PM_DISABLE 0
+#define KSZ9477_ACL_PM_HIGHER 1
+#define KSZ9477_ACL_PM_LOWER 2
+#define KSZ9477_ACL_PM_REPLACE 3
+#define KSZ9477_ACL_P_M GENMASK(5, 3)
+
+#define KSZ9477_PORT_ACL_CTRL_0 0x0612
+
+#define KSZ9477_ACL_WRITE_DONE BIT(6)
+#define KSZ9477_ACL_READ_DONE BIT(5)
+#define KSZ9477_ACL_WRITE BIT(4)
+#define KSZ9477_ACL_INDEX_M GENMASK(3, 0)
+
+/**
+ * ksz9477_dump_acl_index - Print the ACL entry at the specified index
+ *
+ * @dev: Pointer to the ksz9477 device structure.
+ * @acle: Pointer to the ACL entry array.
+ * @index: The index of the ACL entry to print.
+ *
+ * This function prints the details of an ACL entry, located at a particular
+ * index within the ksz9477 device's ACL table. It omits printing entries that
+ * are empty.
+ *
+ * Return: 1 if the entry is non-empty and printed, 0 otherwise.
+ */
+static int ksz9477_dump_acl_index(struct ksz_device *dev,
+ struct ksz9477_acl_entry *acle, int index)
+{
+ bool empty = true;
+ char buf[64];
+ u8 *entry;
+ int i;
+
+ entry = &acle[index].entry[0];
+ for (i = 0; i <= KSZ9477_ACL_PORT_ACCESS_11; i++) {
+ if (entry[i])
+ empty = false;
+
+ sprintf(buf + (i * 3), "%02x ", entry[i]);
+ }
+
+ /* no need to print empty entries */
+ if (empty)
+ return 0;
+
+ dev_err(dev->dev, " Entry %02d, prio: %02d : %s", index,
+ acle[index].prio, buf);
+
+ return 1;
+}
+
+/**
+ * ksz9477_dump_acl - Print ACL entries
+ *
+ * @dev: Pointer to the device structure.
+ * @acle: Pointer to the ACL entry array.
+ */
+static void ksz9477_dump_acl(struct ksz_device *dev,
+ struct ksz9477_acl_entry *acle)
+{
+ int count = 0;
+ int i;
+
+ for (i = 0; i < KSZ9477_ACL_MAX_ENTRIES; i++)
+ count += ksz9477_dump_acl_index(dev, acle, i);
+
+ if (count != KSZ9477_ACL_MAX_ENTRIES - 1)
+ dev_err(dev->dev, " Empty ACL entries were skipped\n");
+}
+
+/**
+ * ksz9477_acl_is_valid_matching_rule - Check if an ACL entry contains a valid
+ * matching rule.
+ *
+ * @entry: Pointer to ACL entry buffer
+ *
+ * This function checks if the given ACL entry buffer contains a valid
+ * matching rule by inspecting the Mode (MD) and Enable (ENB) fields.
+ *
+ * Returns: True if it's a valid matching rule, false otherwise.
+ */
+static bool ksz9477_acl_is_valid_matching_rule(u8 *entry)
+{
+ u8 val1, md, enb;
+
+ val1 = entry[KSZ9477_ACL_PORT_ACCESS_1];
+
+ md = FIELD_GET(KSZ9477_ACL_MD_MASK, val1);
+ if (md == KSZ9477_ACL_MD_DISABLE)
+ return false;
+
+ if (md == KSZ9477_ACL_MD_L2_MAC) {
+ /* L2 counter is not support, so it is not valid rule for now */
+ enb = FIELD_GET(KSZ9477_ACL_ENB_MASK, val1);
+ if (enb == KSZ9477_ACL_ENB_L2_COUNTER)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ksz9477_acl_get_cont_entr - Get count of contiguous ACL entries and validate
+ * the matching rules.
+ * @dev: Pointer to the KSZ9477 device structure.
+ * @port: Port number.
+ * @index: Index of the starting ACL entry.
+ *
+ * Based on the KSZ9477 switch's Access Control List (ACL) system, the RuleSet
+ * in an ACL entry indicates which entries contain Matching rules linked to it.
+ * This RuleSet is represented by two registers: KSZ9477_ACL_PORT_ACCESS_E and
+ * KSZ9477_ACL_PORT_ACCESS_F. Each bit set in these registers corresponds to
+ * an entry containing a Matching rule for this RuleSet.
+ *
+ * For a single Matching rule linked, only one bit is set. However, when an
+ * entry links multiple Matching rules, forming what's termed a 'complex rule',
+ * multiple bits are set in these registers.
+ *
+ * This function checks that, for complex rules, the entries containing the
+ * linked Matching rules are contiguous in terms of their indices. It calculates
+ * and returns the number of these contiguous entries.
+ *
+ * Returns:
+ * - 0 if the entry is empty and can be safely overwritten
+ * - 1 if the entry represents a simple rule
+ * - The number of contiguous entries if it is the root entry of a complex
+ * rule
+ * - -ENOTEMPTY if the entry is part of a complex rule but not the root
+ * entry
+ * - -EINVAL if the validation fails
+ */
+static int ksz9477_acl_get_cont_entr(struct ksz_device *dev, int port,
+ int index)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ int start_idx, end_idx, contiguous_count;
+ unsigned long val;
+ u8 vale, valf;
+ u8 *entry;
+ int i;
+
+ entry = &acles->entries[index].entry[0];
+ vale = entry[KSZ9477_ACL_PORT_ACCESS_E];
+ valf = entry[KSZ9477_ACL_PORT_ACCESS_F];
+
+ val = (vale << 8) | valf;
+
+ /* If no bits are set, return an appropriate value or error */
+ if (!val) {
+ if (ksz9477_acl_is_valid_matching_rule(entry)) {
+ /* Looks like we are about to corrupt some complex rule.
+ * Do not print an error here, as this is a normal case
+ * when we are trying to find a free or starting entry.
+ */
+ dev_dbg(dev->dev, "ACL: entry %d starting with a valid matching rule, but no bits set in RuleSet\n",
+ index);
+ return -ENOTEMPTY;
+ }
+
+ /* This entry does not contain a valid matching rule */
+ return 0;
+ }
+
+ start_idx = find_first_bit((unsigned long *)&val, 16);
+ end_idx = find_last_bit((unsigned long *)&val, 16);
+
+ /* Calculate the contiguous count */
+ contiguous_count = end_idx - start_idx + 1;
+
+ /* Check if the number of bits set in val matches our calculated count */
+ if (contiguous_count != hweight16(val)) {
+ /* Probably we have a fragmented complex rule, which is not
+ * supported by this driver.
+ */
+ dev_err(dev->dev, "ACL: number of bits set in RuleSet does not match calculated count\n");
+ return -EINVAL;
+ }
+
+ /* loop over the contiguous entries and check for valid matching rules */
+ for (i = start_idx; i <= end_idx; i++) {
+ u8 *current_entry = &acles->entries[i].entry[0];
+
+ if (!ksz9477_acl_is_valid_matching_rule(current_entry)) {
+ /* we have something linked without a valid matching
+ * rule. ACL table?
+ */
+ dev_err(dev->dev, "ACL: entry %d does not contain a valid matching rule\n",
+ i);
+ return -EINVAL;
+ }
+
+ if (i > start_idx) {
+ vale = current_entry[KSZ9477_ACL_PORT_ACCESS_E];
+ valf = current_entry[KSZ9477_ACL_PORT_ACCESS_F];
+ /* Following entry should have empty linkage list */
+ if (vale || valf) {
+ dev_err(dev->dev, "ACL: entry %d has non-empty RuleSet linkage\n",
+ i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return contiguous_count;
+}
+
+/**
+ * ksz9477_acl_update_linkage - Update the RuleSet linkage for an ACL entry
+ * after a move operation.
+ *
+ * @dev: Pointer to the ksz_device.
+ * @entry: Pointer to the ACL entry array.
+ * @old_idx: The original index of the ACL entry before moving.
+ * @new_idx: The new index of the ACL entry after moving.
+ *
+ * This function updates the RuleSet linkage bits for an ACL entry when
+ * it's moved from one position to another in the ACL table. The RuleSet
+ * linkage is represented by two 8-bit registers, which are combined
+ * into a 16-bit value for easier manipulation. The linkage bits are shifted
+ * based on the difference between the old and new index. If any bits are lost
+ * during the shift operation, an error is returned.
+ *
+ * Note: Fragmentation within a RuleSet is not supported. Hence, entries must
+ * be moved as complete blocks, maintaining the integrity of the RuleSet.
+ *
+ * Returns: 0 on success, or -EINVAL if any RuleSet linkage bits are lost
+ * during the move.
+ */
+static int ksz9477_acl_update_linkage(struct ksz_device *dev, u8 *entry,
+ u16 old_idx, u16 new_idx)
+{
+ unsigned int original_bit_count;
+ unsigned long rule_linkage;
+ u8 vale, valf, val0;
+ int shift;
+
+ val0 = entry[KSZ9477_ACL_PORT_ACCESS_0];
+ vale = entry[KSZ9477_ACL_PORT_ACCESS_E];
+ valf = entry[KSZ9477_ACL_PORT_ACCESS_F];
+
+ /* Combine the two u8 values into one u16 for easier manipulation */
+ rule_linkage = (vale << 8) | valf;
+ original_bit_count = hweight16(rule_linkage);
+
+ /* Even if HW is able to handle fragmented RuleSet, we don't support it.
+ * RuleSet is filled only for the first entry of the set.
+ */
+ if (!rule_linkage)
+ return 0;
+
+ if (val0 != old_idx) {
+ dev_err(dev->dev, "ACL: entry %d has unexpected ActionRule linkage: %d\n",
+ old_idx, val0);
+ return -EINVAL;
+ }
+
+ val0 = new_idx;
+
+ /* Calculate the number of positions to shift */
+ shift = new_idx - old_idx;
+
+ /* Shift the RuleSet */
+ if (shift > 0)
+ rule_linkage <<= shift;
+ else
+ rule_linkage >>= -shift;
+
+ /* Check that no bits were lost in the process */
+ if (original_bit_count != hweight16(rule_linkage)) {
+ dev_err(dev->dev, "ACL RuleSet linkage bits lost during move\n");
+ return -EINVAL;
+ }
+
+ entry[KSZ9477_ACL_PORT_ACCESS_0] = val0;
+
+ /* Update the RuleSet bitfields in the entry */
+ entry[KSZ9477_ACL_PORT_ACCESS_E] = (rule_linkage >> 8) & 0xFF;
+ entry[KSZ9477_ACL_PORT_ACCESS_F] = rule_linkage & 0xFF;
+
+ return 0;
+}
+
+/**
+ * ksz9477_validate_and_get_src_count - Validate source and destination indices
+ * and determine the source entry count.
+ * @dev: Pointer to the KSZ device structure.
+ * @port: Port number on the KSZ device where the ACL entries reside.
+ * @src_idx: Index of the starting ACL entry that needs to be validated.
+ * @dst_idx: Index of the destination where the source entries are intended to
+ * be moved.
+ * @src_count: Pointer to the variable that will hold the number of contiguous
+ * source entries if the validation passes.
+ * @dst_count: Pointer to the variable that will hold the number of contiguous
+ * destination entries if the validation passes.
+ *
+ * This function performs validation on the source and destination indices
+ * provided for ACL entries. It checks if the indices are within the valid
+ * range, and if the source entries are contiguous. Additionally, the function
+ * ensures that there's adequate space at the destination for the source entries
+ * and that the destination index isn't in the middle of a RuleSet. If all
+ * validations pass, the function returns the number of contiguous source and
+ * destination entries.
+ *
+ * Return: 0 on success, otherwise returns a negative error code if any
+ * validation check fails.
+ */
+static int ksz9477_validate_and_get_src_count(struct ksz_device *dev, int port,
+ int src_idx, int dst_idx,
+ int *src_count, int *dst_count)
+{
+ int ret;
+
+ if (src_idx >= KSZ9477_ACL_MAX_ENTRIES ||
+ dst_idx >= KSZ9477_ACL_MAX_ENTRIES) {
+ dev_err(dev->dev, "ACL: invalid entry index\n");
+ return -EINVAL;
+ }
+
+ /* Nothing to do */
+ if (src_idx == dst_idx)
+ return 0;
+
+ /* Validate if the source entries are contiguous */
+ ret = ksz9477_acl_get_cont_entr(dev, port, src_idx);
+ if (ret < 0)
+ return ret;
+ *src_count = ret;
+
+ if (!*src_count) {
+ dev_err(dev->dev, "ACL: source entry is empty\n");
+ return -EINVAL;
+ }
+
+ if (dst_idx + *src_count >= KSZ9477_ACL_MAX_ENTRIES) {
+ dev_err(dev->dev, "ACL: Not enough space at the destination. Move operation will fail.\n");
+ return -EINVAL;
+ }
+
+ /* Validate if the destination entry is empty or not in the middle of
+ * a RuleSet.
+ */
+ ret = ksz9477_acl_get_cont_entr(dev, port, dst_idx);
+ if (ret < 0)
+ return ret;
+ *dst_count = ret;
+
+ return 0;
+}
+
+/**
+ * ksz9477_move_entries_downwards - Move a range of ACL entries downwards in
+ * the list.
+ * @dev: Pointer to the KSZ device structure.
+ * @acles: Pointer to the structure encapsulating all the ACL entries.
+ * @start_idx: Starting index of the entries to be relocated.
+ * @num_entries_to_move: Number of consecutive entries to be relocated.
+ * @end_idx: Destination index where the first entry should be situated post
+ * relocation.
+ *
+ * This function is responsible for rearranging a specific block of ACL entries
+ * by shifting them downwards in the list based on the supplied source and
+ * destination indices. It ensures that the linkage between the ACL entries is
+ * maintained accurately after the relocation.
+ *
+ * Return: 0 on successful relocation of entries, otherwise returns a negative
+ * error code.
+ */
+static int ksz9477_move_entries_downwards(struct ksz_device *dev,
+ struct ksz9477_acl_entries *acles,
+ u16 start_idx,
+ u16 num_entries_to_move,
+ u16 end_idx)
+{
+ struct ksz9477_acl_entry *e;
+ int ret, i;
+
+ for (i = start_idx; i < end_idx; i++) {
+ e = &acles->entries[i];
+ *e = acles->entries[i + num_entries_to_move];
+
+ ret = ksz9477_acl_update_linkage(dev, &e->entry[0],
+ i + num_entries_to_move, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz9477_move_entries_upwards - Move a range of ACL entries upwards in the
+ * list.
+ * @dev: Pointer to the KSZ device structure.
+ * @acles: Pointer to the structure holding all the ACL entries.
+ * @start_idx: The starting index of the entries to be moved.
+ * @num_entries_to_move: Number of contiguous entries to be moved.
+ * @target_idx: The destination index where the first entry should be placed
+ * after moving.
+ *
+ * This function rearranges a chunk of ACL entries by moving them upwards
+ * in the list based on the given source and destination indices. The reordering
+ * process preserves the linkage between entries by updating it accordingly.
+ *
+ * Return: 0 if the entries were successfully moved, otherwise a negative error
+ * code.
+ */
+static int ksz9477_move_entries_upwards(struct ksz_device *dev,
+ struct ksz9477_acl_entries *acles,
+ u16 start_idx, u16 num_entries_to_move,
+ u16 target_idx)
+{
+ struct ksz9477_acl_entry *e;
+ int ret, i, b;
+
+ for (i = start_idx; i > target_idx; i--) {
+ b = i + num_entries_to_move - 1;
+
+ e = &acles->entries[b];
+ *e = acles->entries[i - 1];
+
+ ret = ksz9477_acl_update_linkage(dev, &e->entry[0], i - 1, b);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz9477_acl_move_entries - Move a block of contiguous ACL entries from a
+ * source to a destination index.
+ * @dev: Pointer to the KSZ9477 device structure.
+ * @port: Port number.
+ * @src_idx: Index of the starting source ACL entry.
+ * @dst_idx: Index of the starting destination ACL entry.
+ *
+ * This function aims to move a block of contiguous ACL entries from the source
+ * index to the destination index while ensuring the integrity and validity of
+ * the ACL table.
+ *
+ * In case of any errors during the adjustments or copying, the function will
+ * restore the ACL entries to their original state from the backup.
+ *
+ * Return: 0 if the move operation is successful. Returns -EINVAL for validation
+ * errors or other error codes based on specific failure conditions.
+ */
+static int ksz9477_acl_move_entries(struct ksz_device *dev, int port,
+ u16 src_idx, u16 dst_idx)
+{
+ struct ksz9477_acl_entry buffer[KSZ9477_ACL_MAX_ENTRIES];
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ int src_count, ret, dst_count;
+
+ ret = ksz9477_validate_and_get_src_count(dev, port, src_idx, dst_idx,
+ &src_count, &dst_count);
+ if (ret)
+ return ret;
+
+ /* In case dst_index is greater than src_index, we need to adjust the
+ * destination index to account for the entries that will be moved
+ * downwards and the size of the entry located at dst_idx.
+ */
+ if (dst_idx > src_idx)
+ dst_idx = dst_idx + dst_count - src_count;
+
+ /* Copy source block to buffer and update its linkage */
+ for (int i = 0; i < src_count; i++) {
+ buffer[i] = acles->entries[src_idx + i];
+ ret = ksz9477_acl_update_linkage(dev, &buffer[i].entry[0],
+ src_idx + i, dst_idx + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Adjust other entries and their linkage based on destination */
+ if (dst_idx > src_idx) {
+ ret = ksz9477_move_entries_downwards(dev, acles, src_idx,
+ src_count, dst_idx);
+ } else {
+ ret = ksz9477_move_entries_upwards(dev, acles, src_idx,
+ src_count, dst_idx);
+ }
+ if (ret < 0)
+ return ret;
+
+ /* Copy buffer to destination block */
+ for (int i = 0; i < src_count; i++)
+ acles->entries[dst_idx + i] = buffer[i];
+
+ return 0;
+}
+
+/**
+ * ksz9477_get_next_block_start - Identify the starting index of the next ACL
+ * block.
+ * @dev: Pointer to the device structure.
+ * @port: The port number on which the ACL entries are being checked.
+ * @start: The starting index from which the search begins.
+ *
+ * This function looks for the next valid ACL block starting from the provided
+ * 'start' index and returns the beginning index of that block. If the block is
+ * invalid or if it reaches the end of the ACL entries without finding another
+ * block, it returns the maximum ACL entries count.
+ *
+ * Returns:
+ * - The starting index of the next valid ACL block.
+ * - KSZ9477_ACL_MAX_ENTRIES if no other valid blocks are found after 'start'.
+ * - A negative error code if an error occurs while checking.
+ */
+static int ksz9477_get_next_block_start(struct ksz_device *dev, int port,
+ int start)
+{
+ int block_size;
+
+ for (int i = start; i < KSZ9477_ACL_MAX_ENTRIES;) {
+ block_size = ksz9477_acl_get_cont_entr(dev, port, i);
+ if (block_size < 0 && block_size != -ENOTEMPTY)
+ return block_size;
+
+ if (block_size > 0)
+ return i;
+
+ i++;
+ }
+ return KSZ9477_ACL_MAX_ENTRIES;
+}
+
+/**
+ * ksz9477_swap_acl_blocks - Swap two ACL blocks
+ * @dev: Pointer to the device structure.
+ * @port: The port number on which the ACL blocks are to be swapped.
+ * @i: The starting index of the first ACL block.
+ * @j: The starting index of the second ACL block.
+ *
+ * This function is used to swap two ACL blocks present at given indices. The
+ * main purpose is to aid in the sorting and reordering of ACL blocks based on
+ * certain criteria, e.g., priority. It checks the validity of the block at
+ * index 'i', ensuring it's not an empty block, and then proceeds to swap it
+ * with the block at index 'j'.
+ *
+ * Returns:
+ * - 0 on successful swapping of blocks.
+ * - -EINVAL if the block at index 'i' is empty.
+ * - A negative error code if any other error occurs during the swap.
+ */
+static int ksz9477_swap_acl_blocks(struct ksz_device *dev, int port, int i,
+ int j)
+{
+ int ret, current_block_size;
+
+ current_block_size = ksz9477_acl_get_cont_entr(dev, port, i);
+ if (current_block_size < 0)
+ return current_block_size;
+
+ if (!current_block_size) {
+ dev_err(dev->dev, "ACL: swapping empty entry %d\n", i);
+ return -EINVAL;
+ }
+
+ ret = ksz9477_acl_move_entries(dev, port, i, j);
+ if (ret)
+ return ret;
+
+ ret = ksz9477_acl_move_entries(dev, port, j - current_block_size, i);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * ksz9477_sort_acl_entr_no_back - Sort ACL entries for a given port based on
+ * priority without backing up entries.
+ * @dev: Pointer to the device structure.
+ * @port: The port number whose ACL entries need to be sorted.
+ *
+ * This function sorts ACL entries of the specified port using a variant of the
+ * bubble sort algorithm. It operates on blocks of ACL entries rather than
+ * individual entries. Each block's starting point is identified and then
+ * compared with subsequent blocks based on their priority. If the current
+ * block has a lower priority than the subsequent block, the two blocks are
+ * swapped.
+ *
+ * This is done in order to maintain an organized order of ACL entries based on
+ * priority, ensuring efficient and predictable ACL rule application.
+ *
+ * Returns:
+ * - 0 on successful sorting of entries.
+ * - A negative error code if any issue arises during sorting, e.g.,
+ * if the function is unable to get the next block start.
+ */
+static int ksz9477_sort_acl_entr_no_back(struct ksz_device *dev, int port)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ struct ksz9477_acl_entry *curr, *next;
+ int i, j, ret;
+
+ /* Bubble sort */
+ for (i = 0; i < KSZ9477_ACL_MAX_ENTRIES;) {
+ curr = &acles->entries[i];
+
+ j = ksz9477_get_next_block_start(dev, port, i + 1);
+ if (j < 0)
+ return j;
+
+ while (j < KSZ9477_ACL_MAX_ENTRIES) {
+ next = &acles->entries[j];
+
+ if (curr->prio > next->prio) {
+ ret = ksz9477_swap_acl_blocks(dev, port, i, j);
+ if (ret)
+ return ret;
+ }
+
+ j = ksz9477_get_next_block_start(dev, port, j + 1);
+ if (j < 0)
+ return j;
+ }
+
+ i = ksz9477_get_next_block_start(dev, port, i + 1);
+ if (i < 0)
+ return i;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz9477_sort_acl_entries - Sort the ACL entries for a given port.
+ * @dev: Pointer to the KSZ device.
+ * @port: Port number.
+ *
+ * This function sorts the Access Control List (ACL) entries for a specified
+ * port. Before sorting, a backup of the original entries is created. If the
+ * sorting process fails, the function will log error messages displaying both
+ * the original and attempted sorted entries, and then restore the original
+ * entries from the backup.
+ *
+ * Return: 0 if the sorting succeeds, otherwise a negative error code.
+ */
+int ksz9477_sort_acl_entries(struct ksz_device *dev, int port)
+{
+ struct ksz9477_acl_entry backup[KSZ9477_ACL_MAX_ENTRIES];
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ int ret;
+
+ /* create a backup of the ACL entries, if something goes wrong
+ * we can restore the ACL entries.
+ */
+ memcpy(backup, acles->entries, sizeof(backup));
+
+ ret = ksz9477_sort_acl_entr_no_back(dev, port);
+ if (ret) {
+ dev_err(dev->dev, "ACL: failed to sort entries for port %d\n",
+ port);
+ dev_err(dev->dev, "ACL dump before sorting:\n");
+ ksz9477_dump_acl(dev, backup);
+ dev_err(dev->dev, "ACL dump after sorting:\n");
+ ksz9477_dump_acl(dev, acles->entries);
+ /* Restore the original entries */
+ memcpy(acles->entries, backup, sizeof(backup));
+ }
+
+ return ret;
+}
+
+/**
+ * ksz9477_acl_wait_ready - Waits for the ACL operation to complete on a given
+ * port.
+ * @dev: The ksz_device instance.
+ * @port: The port number to wait for.
+ *
+ * This function checks if the ACL write or read operation is completed by
+ * polling the specified register.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+static int ksz9477_acl_wait_ready(struct ksz_device *dev, int port)
+{
+ unsigned int wr_mask = KSZ9477_ACL_WRITE_DONE | KSZ9477_ACL_READ_DONE;
+ unsigned int val, reg;
+ int ret;
+
+ reg = dev->dev_ops->get_port_addr(port, KSZ9477_PORT_ACL_CTRL_0);
+
+ ret = regmap_read_poll_timeout(dev->regmap[0], reg, val,
+ (val & wr_mask) == wr_mask, 1000, 10000);
+ if (ret)
+ dev_err(dev->dev, "Failed to read/write ACL table\n");
+
+ return ret;
+}
+
+/**
+ * ksz9477_acl_entry_write - Writes an ACL entry to a given port at the
+ * specified index.
+ * @dev: The ksz_device instance.
+ * @port: The port number to write the ACL entry to.
+ * @entry: A pointer to the ACL entry data.
+ * @idx: The index at which to write the ACL entry.
+ *
+ * This function writes the provided ACL entry to the specified port at the
+ * given index.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+static int ksz9477_acl_entry_write(struct ksz_device *dev, int port, u8 *entry,
+ int idx)
+{
+ int ret, i;
+ u8 val;
+
+ for (i = 0; i < KSZ9477_ACL_ENTRY_SIZE; i++) {
+ ret = ksz_pwrite8(dev, port, KSZ9477_PORT_ACL_0 + i, entry[i]);
+ if (ret) {
+ dev_err(dev->dev, "Failed to write ACL entry %d\n", i);
+ return ret;
+ }
+ }
+
+ /* write everything down */
+ val = FIELD_PREP(KSZ9477_ACL_INDEX_M, idx) | KSZ9477_ACL_WRITE;
+ ret = ksz_pwrite8(dev, port, KSZ9477_PORT_ACL_CTRL_0, val);
+ if (ret)
+ return ret;
+
+ /* wait until everything is written */
+ return ksz9477_acl_wait_ready(dev, port);
+}
+
+/**
+ * ksz9477_acl_port_enable - Enables ACL functionality on a given port.
+ * @dev: The ksz_device instance.
+ * @port: The port number on which to enable ACL functionality.
+ *
+ * This function enables ACL functionality on the specified port by configuring
+ * the appropriate control registers. It returns 0 if the operation is
+ * successful, or a negative error code if an error occurs.
+ *
+ * 0xn801 - KSZ9477S 5.2.8.2 Port Priority Control Register
+ * Bit 7 - Highest Priority
+ * Bit 6 - OR'ed Priority
+ * Bit 4 - MAC Address Priority Classification
+ * Bit 3 - VLAN Priority Classification
+ * Bit 2 - 802.1p Priority Classification
+ * Bit 1 - Diffserv Priority Classification
+ * Bit 0 - ACL Priority Classification
+ *
+ * Current driver implementation sets 802.1p priority classification by default.
+ * In this function we add ACL priority classification with OR'ed priority.
+ * According to testing, priority set by ACL will supersede the 802.1p priority.
+ *
+ * 0xn803 - KSZ9477S 5.2.8.4 Port Authentication Control Register
+ * Bit 2 - Access Control List (ACL) Enable
+ * Bits 1:0 - Authentication Mode
+ * 00 = Reserved
+ * 01 = Block Mode. Authentication is enabled. When ACL is
+ * enabled, all traffic that misses the ACL rules is
+ * blocked; otherwise ACL actions apply.
+ * 10 = Pass Mode. Authentication is disabled. When ACL is
+ * enabled, all traffic that misses the ACL rules is
+ * forwarded; otherwise ACL actions apply.
+ * 11 = Trap Mode. Authentication is enabled. All traffic is
+ * forwarded to the host port. When ACL is enabled, all
+ * traffic that misses the ACL rules is blocked; otherwise
+ * ACL actions apply.
+ *
+ * We are using Pass Mode int this function.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+static int ksz9477_acl_port_enable(struct ksz_device *dev, int port)
+{
+ int ret;
+
+ ret = ksz_prmw8(dev, port, P_PRIO_CTRL, 0, PORT_ACL_PRIO_ENABLE |
+ PORT_OR_PRIO);
+ if (ret)
+ return ret;
+
+ return ksz_pwrite8(dev, port, REG_PORT_MRI_AUTHEN_CTRL,
+ PORT_ACL_ENABLE |
+ FIELD_PREP(PORT_AUTHEN_MODE, PORT_AUTHEN_PASS));
+}
+
+/**
+ * ksz9477_acl_port_disable - Disables ACL functionality on a given port.
+ * @dev: The ksz_device instance.
+ * @port: The port number on which to disable ACL functionality.
+ *
+ * This function disables ACL functionality on the specified port by writing a
+ * value of 0 to the REG_PORT_MRI_AUTHEN_CTRL control register and remove
+ * PORT_ACL_PRIO_ENABLE bit from P_PRIO_CTRL register.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+static int ksz9477_acl_port_disable(struct ksz_device *dev, int port)
+{
+ int ret;
+
+ ret = ksz_prmw8(dev, port, P_PRIO_CTRL, PORT_ACL_PRIO_ENABLE, 0);
+ if (ret)
+ return ret;
+
+ return ksz_pwrite8(dev, port, REG_PORT_MRI_AUTHEN_CTRL, 0);
+}
+
+/**
+ * ksz9477_acl_write_list - Write a list of ACL entries to a given port.
+ * @dev: The ksz_device instance.
+ * @port: The port number on which to write ACL entries.
+ *
+ * This function enables ACL functionality on the specified port, writes a list
+ * of ACL entries to the port, and disables ACL functionality if there are no
+ * entries.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+int ksz9477_acl_write_list(struct ksz_device *dev, int port)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ int ret, i;
+
+ /* ACL should be enabled before writing entries */
+ ret = ksz9477_acl_port_enable(dev, port);
+ if (ret)
+ return ret;
+
+ /* write all entries */
+ for (i = 0; i < ARRAY_SIZE(acles->entries); i++) {
+ u8 *entry = acles->entries[i].entry;
+
+ /* Check if entry was removed and should be zeroed.
+ * If last fields of the entry are not zero, it means
+ * it is removed locally but currently not synced with the HW.
+ * So, we will write it down to the HW to remove it.
+ */
+ if (i >= acles->entries_count &&
+ entry[KSZ9477_ACL_PORT_ACCESS_10] == 0 &&
+ entry[KSZ9477_ACL_PORT_ACCESS_11] == 0)
+ continue;
+
+ ret = ksz9477_acl_entry_write(dev, port, entry, i);
+ if (ret)
+ return ret;
+
+ /* now removed entry is clean on HW side, so it can
+ * in the cache too
+ */
+ if (i >= acles->entries_count &&
+ entry[KSZ9477_ACL_PORT_ACCESS_10] != 0 &&
+ entry[KSZ9477_ACL_PORT_ACCESS_11] != 0) {
+ entry[KSZ9477_ACL_PORT_ACCESS_10] = 0;
+ entry[KSZ9477_ACL_PORT_ACCESS_11] = 0;
+ }
+ }
+
+ if (!acles->entries_count)
+ return ksz9477_acl_port_disable(dev, port);
+
+ return 0;
+}
+
+/**
+ * ksz9477_acl_remove_entries - Remove ACL entries with a given cookie from a
+ * specified ksz9477_acl_entries structure.
+ * @dev: The ksz_device instance.
+ * @port: The port number on which to remove ACL entries.
+ * @acles: The ksz9477_acl_entries instance.
+ * @cookie: The cookie value to match for entry removal.
+ *
+ * This function iterates through the entries array, removing any entries with
+ * a matching cookie value. The remaining entries are then shifted down to fill
+ * the gap.
+ */
+void ksz9477_acl_remove_entries(struct ksz_device *dev, int port,
+ struct ksz9477_acl_entries *acles,
+ unsigned long cookie)
+{
+ int entries_count = acles->entries_count;
+ int ret, i, src_count;
+ int src_idx = -1;
+
+ if (!entries_count)
+ return;
+
+ /* Search for the first position with the cookie */
+ for (i = 0; i < entries_count; i++) {
+ if (acles->entries[i].cookie == cookie) {
+ src_idx = i;
+ break;
+ }
+ }
+
+ /* No entries with the matching cookie found */
+ if (src_idx == -1)
+ return;
+
+ /* Get the size of the cookie entry. We may have complex entries. */
+ src_count = ksz9477_acl_get_cont_entr(dev, port, src_idx);
+ if (src_count <= 0)
+ return;
+
+ /* Move all entries down to overwrite removed entry with the cookie */
+ ret = ksz9477_move_entries_downwards(dev, acles, src_idx,
+ src_count,
+ entries_count - src_count);
+ if (ret) {
+ dev_err(dev->dev, "Failed to move ACL entries down\n");
+ return;
+ }
+
+ /* Overwrite new empty places at the end of the list with zeros to make
+ * sure not unexpected things will happen or no unexplored quirks will
+ * come out.
+ */
+ for (i = entries_count - src_count; i < entries_count; i++) {
+ struct ksz9477_acl_entry *entry = &acles->entries[i];
+
+ memset(entry, 0, sizeof(*entry));
+
+ /* Set all access bits to be able to write zeroed entry to HW */
+ entry->entry[KSZ9477_ACL_PORT_ACCESS_10] = 0xff;
+ entry->entry[KSZ9477_ACL_PORT_ACCESS_11] = 0xff;
+ }
+
+ /* Adjust the total entries count */
+ acles->entries_count -= src_count;
+}
+
+/**
+ * ksz9477_port_acl_init - Initialize the ACL for a specified port on a ksz
+ * device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to initialize the ACL for.
+ *
+ * This function allocates memory for an acl structure, associates it with the
+ * specified port, and initializes the ACL entries to a default state. The
+ * entries are then written using the ksz9477_acl_write_list function, ensuring
+ * the ACL has a predictable initial hardware state.
+ *
+ * Returns: 0 on success, or an error code on failure.
+ */
+int ksz9477_port_acl_init(struct ksz_device *dev, int port)
+{
+ struct ksz9477_acl_entries *acles;
+ struct ksz9477_acl_priv *acl;
+ int ret, i;
+
+ acl = kzalloc(sizeof(*acl), GFP_KERNEL);
+ if (!acl)
+ return -ENOMEM;
+
+ dev->ports[port].acl_priv = acl;
+
+ acles = &acl->acles;
+ /* write all entries */
+ for (i = 0; i < ARRAY_SIZE(acles->entries); i++) {
+ u8 *entry = acles->entries[i].entry;
+
+ /* Set all access bits to be able to write zeroed
+ * entry
+ */
+ entry[KSZ9477_ACL_PORT_ACCESS_10] = 0xff;
+ entry[KSZ9477_ACL_PORT_ACCESS_11] = 0xff;
+ }
+
+ ret = ksz9477_acl_write_list(dev, port);
+ if (ret)
+ goto free_acl;
+
+ return 0;
+
+free_acl:
+ kfree(dev->ports[port].acl_priv);
+ dev->ports[port].acl_priv = NULL;
+
+ return ret;
+}
+
+/**
+ * ksz9477_port_acl_free - Free the ACL resources for a specified port on a ksz
+ * device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to initialize the ACL for.
+ *
+ * This disables the ACL for the specified port and frees the associated memory,
+ */
+void ksz9477_port_acl_free(struct ksz_device *dev, int port)
+{
+ if (!dev->ports[port].acl_priv)
+ return;
+
+ ksz9477_acl_port_disable(dev, port);
+
+ kfree(dev->ports[port].acl_priv);
+ dev->ports[port].acl_priv = NULL;
+}
+
+/**
+ * ksz9477_acl_set_reg - Set entry[16] and entry[17] depending on the updated
+ * entry[]
+ * @entry: An array containing the entries
+ * @reg: The register of the entry that needs to be updated
+ * @value: The value to be assigned to the updated entry
+ *
+ * This function updates the entry[] array based on the provided register and
+ * value. It also sets entry[0x10] and entry[0x11] according to the ACL byte
+ * enable rules.
+ *
+ * 0x10 - Byte Enable [15:8]
+ *
+ * Each bit enables accessing one of the ACL bytes when a read or write is
+ * initiated by writing to the Port ACL Byte Enable LSB Register.
+ * Bit 0 applies to the Port ACL Access 7 Register
+ * Bit 1 applies to the Port ACL Access 6 Register, etc.
+ * Bit 7 applies to the Port ACL Access 0 Register
+ * 1 = Byte is selected for read/write
+ * 0 = Byte is not selected
+ *
+ * 0x11 - Byte Enable [7:0]
+ *
+ * Each bit enables accessing one of the ACL bytes when a read or write is
+ * initiated by writing to the Port ACL Byte Enable LSB Register.
+ * Bit 0 applies to the Port ACL Access F Register
+ * Bit 1 applies to the Port ACL Access E Register, etc.
+ * Bit 7 applies to the Port ACL Access 8 Register
+ * 1 = Byte is selected for read/write
+ * 0 = Byte is not selected
+ */
+static void ksz9477_acl_set_reg(u8 *entry, enum ksz9477_acl_port_access reg,
+ u8 value)
+{
+ if (reg >= KSZ9477_ACL_PORT_ACCESS_0 &&
+ reg <= KSZ9477_ACL_PORT_ACCESS_7) {
+ entry[KSZ9477_ACL_PORT_ACCESS_10] |=
+ BIT(KSZ9477_ACL_PORT_ACCESS_7 - reg);
+ } else if (reg >= KSZ9477_ACL_PORT_ACCESS_8 &&
+ reg <= KSZ9477_ACL_PORT_ACCESS_F) {
+ entry[KSZ9477_ACL_PORT_ACCESS_11] |=
+ BIT(KSZ9477_ACL_PORT_ACCESS_F - reg);
+ } else {
+ WARN_ON(1);
+ return;
+ }
+
+ entry[reg] = value;
+}
+
+/**
+ * ksz9477_acl_matching_rule_cfg_l2 - Configure an ACL filtering entry to match
+ * L2 types of Ethernet frames
+ * @entry: Pointer to ACL entry buffer
+ * @ethertype: Ethertype value
+ * @eth_addr: Pointer to Ethernet address
+ * @is_src: If true, match the source MAC address; if false, match the
+ * destination MAC address
+ *
+ * This function configures an Access Control List (ACL) filtering
+ * entry to match Layer 2 types of Ethernet frames based on the provided
+ * ethertype and Ethernet address. Additionally, it can match either the source
+ * or destination MAC address depending on the value of the is_src parameter.
+ *
+ * Register Descriptions for MD = 01 and ENB != 00 (Layer 2 MAC header
+ * filtering)
+ *
+ * 0x01 - Mode and Enable
+ * Bits 5:4 - MD (Mode)
+ * 01 = Layer 2 MAC header or counter filtering
+ * Bits 3:2 - ENB (Enable)
+ * 01 = Comparison is performed only on the TYPE value
+ * 10 = Comparison is performed only on the MAC Address value
+ * 11 = Both the MAC Address and TYPE are tested
+ * Bit 1 - S/D (Source / Destination)
+ * 0 = Destination address
+ * 1 = Source address
+ * Bit 0 - EQ (Equal / Not Equal)
+ * 0 = Not Equal produces true result
+ * 1 = Equal produces true result
+ *
+ * 0x02-0x07 - MAC Address
+ * 0x02 - MAC Address [47:40]
+ * 0x03 - MAC Address [39:32]
+ * 0x04 - MAC Address [31:24]
+ * 0x05 - MAC Address [23:16]
+ * 0x06 - MAC Address [15:8]
+ * 0x07 - MAC Address [7:0]
+ *
+ * 0x08-0x09 - EtherType
+ * 0x08 - EtherType [15:8]
+ * 0x09 - EtherType [7:0]
+ */
+static void ksz9477_acl_matching_rule_cfg_l2(u8 *entry, u16 ethertype,
+ u8 *eth_addr, bool is_src)
+{
+ u8 enb = 0;
+ u8 val;
+
+ if (ethertype)
+ enb |= KSZ9477_ACL_ENB_L2_TYPE;
+ if (eth_addr)
+ enb |= KSZ9477_ACL_ENB_L2_MAC;
+
+ val = FIELD_PREP(KSZ9477_ACL_MD_MASK, KSZ9477_ACL_MD_L2_MAC) |
+ FIELD_PREP(KSZ9477_ACL_ENB_MASK, enb) |
+ FIELD_PREP(KSZ9477_ACL_SD_SRC, is_src) | KSZ9477_ACL_EQ_EQUAL;
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_1, val);
+
+ if (eth_addr) {
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ ksz9477_acl_set_reg(entry,
+ KSZ9477_ACL_PORT_ACCESS_2 + i,
+ eth_addr[i]);
+ }
+ }
+
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_8, ethertype >> 8);
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_9, ethertype & 0xff);
+}
+
+/**
+ * ksz9477_acl_action_rule_cfg - Set action for an ACL entry
+ * @entry: Pointer to the ACL entry
+ * @force_prio: If true, force the priority value
+ * @prio_val: Priority value
+ *
+ * This function sets the action for the specified ACL entry. It prepares
+ * the priority mode and traffic class values and updates the entry's
+ * action registers accordingly. Currently, there is no port or VLAN PCP
+ * remapping.
+ *
+ * ACL Action Rule Parameters for Non-Count Modes (MD ≠ 01 or ENB ≠ 00)
+ *
+ * 0x0A - PM, P, RPE, RP[2:1]
+ * Bits 7:6 - PM[1:0] - Priority Mode
+ * 00 = ACL does not specify the packet priority. Priority is
+ * determined by standard QoS functions.
+ * 01 = Change packet priority to P[2:0] if it is greater than QoS
+ * result.
+ * 10 = Change packet priority to P[2:0] if it is smaller than the
+ * QoS result.
+ * 11 = Always change packet priority to P[2:0].
+ * Bits 5:3 - P[2:0] - Priority value
+ * Bit 2 - RPE - Remark Priority Enable
+ * Bits 1:0 - RP[2:1] - Remarked Priority value (bits 2:1)
+ * 0 = Disable priority remarking
+ * 1 = Enable priority remarking. VLAN tag priority (PCP) bits are
+ * replaced by RP[2:0].
+ *
+ * 0x0B - RP[0], MM
+ * Bit 7 - RP[0] - Remarked Priority value (bit 0)
+ * Bits 6:5 - MM[1:0] - Map Mode
+ * 00 = No forwarding remapping
+ * 01 = The forwarding map in FORWARD is OR'ed with the forwarding
+ * map from the Address Lookup Table.
+ * 10 = The forwarding map in FORWARD is AND'ed with the forwarding
+ * map from the Address Lookup Table.
+ * 11 = The forwarding map in FORWARD replaces the forwarding map
+ * from the Address Lookup Table.
+ * 0x0D - FORWARD[n:0]
+ * Bits 7:0 - FORWARD[n:0] - Forwarding map. Bit 0 = port 1,
+ * bit 1 = port 2, etc.
+ * 1 = enable forwarding to this port
+ * 0 = do not forward to this port
+ */
+void ksz9477_acl_action_rule_cfg(u8 *entry, bool force_prio, u8 prio_val)
+{
+ u8 prio_mode, val;
+
+ if (force_prio)
+ prio_mode = KSZ9477_ACL_PM_REPLACE;
+ else
+ prio_mode = KSZ9477_ACL_PM_DISABLE;
+
+ val = FIELD_PREP(KSZ9477_ACL_PM_M, prio_mode) |
+ FIELD_PREP(KSZ9477_ACL_P_M, prio_val);
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_A, val);
+
+ /* no port or VLAN PCP remapping for now */
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_B, 0);
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_D, 0);
+}
+
+/**
+ * ksz9477_acl_processing_rule_set_action - Set the action for the processing
+ * rule set.
+ * @entry: Pointer to the ACL entry
+ * @action_idx: Index of the action to be applied
+ *
+ * This function sets the action for the processing rule set by updating the
+ * appropriate register in the entry. There can be only one action per
+ * processing rule.
+ *
+ * Access Control List (ACL) Processing Rule Registers:
+ *
+ * 0x00 - First Rule Number (FRN)
+ * Bits 3:0 - First Rule Number. Pointer to an Action rule entry.
+ */
+void ksz9477_acl_processing_rule_set_action(u8 *entry, u8 action_idx)
+{
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_0, action_idx);
+}
+
+/**
+ * ksz9477_acl_processing_rule_add_match - Add a matching rule to the rule set
+ * @entry: Pointer to the ACL entry
+ * @match_idx: Index of the matching rule to be added
+ *
+ * This function adds a matching rule to the rule set by updating the
+ * appropriate bits in the entry's rule set registers.
+ *
+ * Access Control List (ACL) Processing Rule Registers:
+ *
+ * 0x0E - RuleSet [15:8]
+ * Bits 7:0 - RuleSet [15:8] Specifies a set of one or more Matching rule
+ * entries. RuleSet has one bit for each of the 16 Matching rule entries.
+ * If multiple Matching rules are selected, then all conditions will be
+ * AND'ed to produce a final match result.
+ * 0 = Matching rule not selected
+ * 1 = Matching rule selected
+ *
+ * 0x0F - RuleSet [7:0]
+ * Bits 7:0 - RuleSet [7:0]
+ */
+static void ksz9477_acl_processing_rule_add_match(u8 *entry, u8 match_idx)
+{
+ u8 vale = entry[KSZ9477_ACL_PORT_ACCESS_E];
+ u8 valf = entry[KSZ9477_ACL_PORT_ACCESS_F];
+
+ if (match_idx < 8)
+ valf |= BIT(match_idx);
+ else
+ vale |= BIT(match_idx - 8);
+
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_E, vale);
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_F, valf);
+}
+
+/**
+ * ksz9477_acl_get_init_entry - Get a new uninitialized entry for a specified
+ * port on a ksz_device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to get the uninitialized entry for.
+ * @cookie: The cookie to associate with the entry.
+ * @prio: The priority to associate with the entry.
+ *
+ * This function retrieves the next available ACL entry for the specified port,
+ * clears all access flags, and associates it with the current cookie.
+ *
+ * Returns: A pointer to the new uninitialized ACL entry.
+ */
+static struct ksz9477_acl_entry *
+ksz9477_acl_get_init_entry(struct ksz_device *dev, int port,
+ unsigned long cookie, u32 prio)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ struct ksz9477_acl_entry *entry;
+
+ entry = &acles->entries[acles->entries_count];
+ entry->cookie = cookie;
+ entry->prio = prio;
+
+ /* clear all access flags */
+ entry->entry[KSZ9477_ACL_PORT_ACCESS_10] = 0;
+ entry->entry[KSZ9477_ACL_PORT_ACCESS_11] = 0;
+
+ return entry;
+}
+
+/**
+ * ksz9477_acl_match_process_l2 - Configure Layer 2 ACL matching rules and
+ * processing rules.
+ * @dev: Pointer to the ksz_device.
+ * @port: Port number.
+ * @ethtype: Ethernet type.
+ * @src_mac: Source MAC address.
+ * @dst_mac: Destination MAC address.
+ * @cookie: The cookie to associate with the entry.
+ * @prio: The priority of the entry.
+ *
+ * This function sets up matching and processing rules for Layer 2 ACLs.
+ * It takes into account that only one MAC per entry is supported.
+ */
+void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port,
+ u16 ethtype, u8 *src_mac, u8 *dst_mac,
+ unsigned long cookie, u32 prio)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ struct ksz9477_acl_entry *entry;
+
+ entry = ksz9477_acl_get_init_entry(dev, port, cookie, prio);
+
+ /* ACL supports only one MAC per entry */
+ if (src_mac && dst_mac) {
+ ksz9477_acl_matching_rule_cfg_l2(entry->entry, ethtype, src_mac,
+ true);
+
+ /* Add both match entries to first processing rule */
+ ksz9477_acl_processing_rule_add_match(entry->entry,
+ acles->entries_count);
+ acles->entries_count++;
+ ksz9477_acl_processing_rule_add_match(entry->entry,
+ acles->entries_count);
+
+ entry = ksz9477_acl_get_init_entry(dev, port, cookie, prio);
+ ksz9477_acl_matching_rule_cfg_l2(entry->entry, 0, dst_mac,
+ false);
+ acles->entries_count++;
+ } else {
+ u8 *mac = src_mac ? src_mac : dst_mac;
+ bool is_src = src_mac ? true : false;
+
+ ksz9477_acl_matching_rule_cfg_l2(entry->entry, ethtype, mac,
+ is_src);
+ ksz9477_acl_processing_rule_add_match(entry->entry,
+ acles->entries_count);
+ acles->entries_count++;
+ }
+}
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index cba3dba58bc3..f3a205ee483f 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -112,19 +112,6 @@
#define REG_SW_IBA_SYNC__1 0x010C
-#define REG_SW_IO_STRENGTH__1 0x010D
-#define SW_DRIVE_STRENGTH_M 0x7
-#define SW_DRIVE_STRENGTH_2MA 0
-#define SW_DRIVE_STRENGTH_4MA 1
-#define SW_DRIVE_STRENGTH_8MA 2
-#define SW_DRIVE_STRENGTH_12MA 3
-#define SW_DRIVE_STRENGTH_16MA 4
-#define SW_DRIVE_STRENGTH_20MA 5
-#define SW_DRIVE_STRENGTH_24MA 6
-#define SW_DRIVE_STRENGTH_28MA 7
-#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
-#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
-
#define REG_SW_IBA_STATUS__4 0x0110
#define SW_IBA_REQ BIT(31)
@@ -166,13 +153,6 @@
#define SW_DOUBLE_TAG BIT(7)
#define SW_RESET BIT(1)
-#define REG_SW_MAC_ADDR_0 0x0302
-#define REG_SW_MAC_ADDR_1 0x0303
-#define REG_SW_MAC_ADDR_2 0x0304
-#define REG_SW_MAC_ADDR_3 0x0305
-#define REG_SW_MAC_ADDR_4 0x0306
-#define REG_SW_MAC_ADDR_5 0x0307
-
#define REG_SW_MTU__2 0x0308
#define REG_SW_MTU_MASK GENMASK(13, 0)
diff --git a/drivers/net/dsa/microchip/ksz9477_tc_flower.c b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
new file mode 100644
index 000000000000..8b2f5be667e0
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+
+#include "ksz9477.h"
+#include "ksz9477_reg.h"
+#include "ksz_common.h"
+
+#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0)
+#define KSZ9477_MAX_TC 7
+
+/**
+ * ksz9477_flower_parse_key_l2 - Parse Layer 2 key from flow rule and configure
+ * ACL entries accordingly.
+ * @dev: Pointer to the ksz_device.
+ * @port: Port number.
+ * @extack: Pointer to the netlink_ext_ack.
+ * @rule: Pointer to the flow_rule.
+ * @cookie: The cookie to associate with the entry.
+ * @prio: The priority of the entry.
+ *
+ * This function parses the Layer 2 key from the flow rule and configures
+ * the corresponding ACL entries. It checks for unsupported offloads and
+ * available entries before proceeding with the configuration.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int ksz9477_flower_parse_key_l2(struct ksz_device *dev, int port,
+ struct netlink_ext_ack *extack,
+ struct flow_rule *rule,
+ unsigned long cookie, u32 prio)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct flow_match_eth_addrs ematch;
+ struct ksz9477_acl_entries *acles;
+ int required_entries;
+ u8 *src_mac = NULL;
+ u8 *dst_mac = NULL;
+ u16 ethtype = 0;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ if (match.key->n_proto) {
+ if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "ethernet type mask must be a full mask");
+ return -EINVAL;
+ }
+
+ ethtype = be16_to_cpu(match.key->n_proto);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ flow_rule_match_eth_addrs(rule, &ematch);
+
+ if (!is_zero_ether_addr(ematch.key->src)) {
+ if (!is_broadcast_ether_addr(ematch.mask->src))
+ goto not_full_mask_err;
+
+ src_mac = ematch.key->src;
+ }
+
+ if (!is_zero_ether_addr(ematch.key->dst)) {
+ if (!is_broadcast_ether_addr(ematch.mask->dst))
+ goto not_full_mask_err;
+
+ dst_mac = ematch.key->dst;
+ }
+ }
+
+ acles = &acl->acles;
+ /* ACL supports only one MAC per entry */
+ required_entries = src_mac && dst_mac ? 2 : 1;
+
+ /* Check if there are enough available entries */
+ if (acles->entries_count + required_entries > KSZ9477_ACL_MAX_ENTRIES) {
+ NL_SET_ERR_MSG_MOD(extack, "ACL entry limit reached");
+ return -EOPNOTSUPP;
+ }
+
+ ksz9477_acl_match_process_l2(dev, port, ethtype, src_mac, dst_mac,
+ cookie, prio);
+
+ return 0;
+
+not_full_mask_err:
+ NL_SET_ERR_MSG_MOD(extack, "MAC address mask must be a full mask");
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ksz9477_flower_parse_key - Parse flow rule keys for a specified port on a
+ * ksz_device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to parse the flow rule keys for.
+ * @extack: The netlink extended ACK for reporting errors.
+ * @rule: The flow_rule to parse.
+ * @cookie: The cookie to associate with the entry.
+ * @prio: The priority of the entry.
+ *
+ * This function checks if the used keys in the flow rule are supported by
+ * the device and parses the L2 keys if they match. If unsupported keys are
+ * used, an error message is set in the extended ACK.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int ksz9477_flower_parse_key(struct ksz_device *dev, int port,
+ struct netlink_ext_ack *extack,
+ struct flow_rule *rule,
+ unsigned long cookie, u32 prio)
+{
+ struct flow_dissector *dissector = rule->match.dissector;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ ret = ksz9477_flower_parse_key_l2(dev, port, extack, rule,
+ cookie, prio);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz9477_flower_parse_action - Parse flow rule actions for a specified port
+ * on a ksz_device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to parse the flow rule actions for.
+ * @extack: The netlink extended ACK for reporting errors.
+ * @cls: The flow_cls_offload instance containing the flow rule.
+ * @entry_idx: The index of the ACL entry to store the action.
+ *
+ * This function checks if the actions in the flow rule are supported by
+ * the device. Currently, only actions that change priorities are supported.
+ * If unsupported actions are encountered, an error message is set in the
+ * extended ACK.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int ksz9477_flower_parse_action(struct ksz_device *dev, int port,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ int entry_idx)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ const struct flow_action_entry *act;
+ struct ksz9477_acl_entry *entry;
+ bool prio_force = false;
+ u8 prio_val = 0;
+ int i;
+
+ if (TC_H_MIN(cls->classid)) {
+ NL_SET_ERR_MSG_MOD(extack, "hw_tc is not supported. Use: action skbedit prio");
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_PRIORITY:
+ if (act->priority > KSZ9477_MAX_TC) {
+ NL_SET_ERR_MSG_MOD(extack, "Priority value is too high");
+ return -EOPNOTSUPP;
+ }
+ prio_force = true;
+ prio_val = act->priority;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* pick entry to store action */
+ entry = &acl->acles.entries[entry_idx];
+
+ ksz9477_acl_action_rule_cfg(entry->entry, prio_force, prio_val);
+ ksz9477_acl_processing_rule_set_action(entry->entry, entry_idx);
+
+ return 0;
+}
+
+/**
+ * ksz9477_cls_flower_add - Add a flow classification rule for a specified port
+ * on a ksz_device.
+ * @ds: The DSA switch instance.
+ * @port: The port number to add the flow classification rule to.
+ * @cls: The flow_cls_offload instance containing the flow rule.
+ * @ingress: A flag indicating if the rule is applied on the ingress path.
+ *
+ * This function adds a flow classification rule for a specified port on a
+ * ksz_device. It checks if the ACL offloading is supported and parses the flow
+ * keys and actions. If the ACL is not supported, it returns an error. If there
+ * are unprocessed entries, it parses the action for the rule.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int ksz9477_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ksz_device *dev = ds->priv;
+ struct ksz9477_acl_priv *acl;
+ int action_entry_idx;
+ int ret;
+
+ acl = dev->ports[port].acl_priv;
+
+ if (!acl) {
+ NL_SET_ERR_MSG_MOD(extack, "ACL offloading is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* A complex rule set can take multiple entries. Use first entry
+ * to store the action.
+ */
+ action_entry_idx = acl->acles.entries_count;
+
+ ret = ksz9477_flower_parse_key(dev, port, extack, rule, cls->cookie,
+ cls->common.prio);
+ if (ret)
+ return ret;
+
+ ret = ksz9477_flower_parse_action(dev, port, extack, cls,
+ action_entry_idx);
+ if (ret)
+ return ret;
+
+ ret = ksz9477_sort_acl_entries(dev, port);
+ if (ret)
+ return ret;
+
+ return ksz9477_acl_write_list(dev, port);
+}
+
+/**
+ * ksz9477_cls_flower_del - Remove a flow classification rule for a specified
+ * port on a ksz_device.
+ * @ds: The DSA switch instance.
+ * @port: The port number to remove the flow classification rule from.
+ * @cls: The flow_cls_offload instance containing the flow rule.
+ * @ingress: A flag indicating if the rule is applied on the ingress path.
+ *
+ * This function removes a flow classification rule for a specified port on a
+ * ksz_device. It checks if the ACL is initialized, and if not, returns an
+ * error. If the ACL is initialized, it removes entries with the specified
+ * cookie and rewrites the ACL list.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int ksz9477_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ unsigned long cookie = cls->cookie;
+ struct ksz_device *dev = ds->priv;
+ struct ksz9477_acl_priv *acl;
+
+ acl = dev->ports[port].acl_priv;
+
+ if (!acl)
+ return -EOPNOTSUPP;
+
+ ksz9477_acl_remove_entries(dev, port, &acl->acles, cookie);
+
+ return ksz9477_acl_write_list(dev, port);
+}
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 42db7679c360..b800ace40ce1 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -16,6 +16,7 @@
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
+#include <linux/if_hsr.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
@@ -186,6 +187,72 @@ static const struct ksz_mib_names ksz9477_mib_names[] = {
{ 0x83, "tx_discards" },
};
+struct ksz_driver_strength_prop {
+ const char *name;
+ int offset;
+ int value;
+};
+
+enum ksz_driver_strength_type {
+ KSZ_DRIVER_STRENGTH_HI,
+ KSZ_DRIVER_STRENGTH_LO,
+ KSZ_DRIVER_STRENGTH_IO,
+};
+
+/**
+ * struct ksz_drive_strength - drive strength mapping
+ * @reg_val: register value
+ * @microamp: microamp value
+ */
+struct ksz_drive_strength {
+ u32 reg_val;
+ u32 microamp;
+};
+
+/* ksz9477_drive_strengths - Drive strength mapping for KSZ9477 variants
+ *
+ * This values are not documented in KSZ9477 variants but confirmed by
+ * Microchip that KSZ9477, KSZ9567, KSZ8567, KSZ9897, KSZ9896, KSZ9563, KSZ9893
+ * and KSZ8563 are using same register (drive strength) settings like KSZ8795.
+ *
+ * Documentation in KSZ8795CLX provides more information with some
+ * recommendations:
+ * - for high speed signals
+ * 1. 4 mA or 8 mA is often used for MII, RMII, and SPI interface with using
+ * 2.5V or 3.3V VDDIO.
+ * 2. 12 mA or 16 mA is often used for MII, RMII, and SPI interface with
+ * using 1.8V VDDIO.
+ * 3. 20 mA or 24 mA is often used for GMII/RGMII interface with using 2.5V
+ * or 3.3V VDDIO.
+ * 4. 28 mA is often used for GMII/RGMII interface with using 1.8V VDDIO.
+ * 5. In same interface, the heavy loading should use higher one of the
+ * drive current strength.
+ * - for low speed signals
+ * 1. 3.3V VDDIO, use either 4 mA or 8 mA.
+ * 2. 2.5V VDDIO, use either 8 mA or 12 mA.
+ * 3. 1.8V VDDIO, use either 12 mA or 16 mA.
+ * 4. If it is heavy loading, can use higher drive current strength.
+ */
+static const struct ksz_drive_strength ksz9477_drive_strengths[] = {
+ { SW_DRIVE_STRENGTH_2MA, 2000 },
+ { SW_DRIVE_STRENGTH_4MA, 4000 },
+ { SW_DRIVE_STRENGTH_8MA, 8000 },
+ { SW_DRIVE_STRENGTH_12MA, 12000 },
+ { SW_DRIVE_STRENGTH_16MA, 16000 },
+ { SW_DRIVE_STRENGTH_20MA, 20000 },
+ { SW_DRIVE_STRENGTH_24MA, 24000 },
+ { SW_DRIVE_STRENGTH_28MA, 28000 },
+};
+
+/* ksz8830_drive_strengths - Drive strength mapping for KSZ8830, KSZ8873, ..
+ * variants.
+ * This values are documented in KSZ8873 and KSZ8863 datasheets.
+ */
+static const struct ksz_drive_strength ksz8830_drive_strengths[] = {
+ { 0, 8000 },
+ { KSZ8873_DRIVE_STRENGTH_16MA, 16000 },
+};
+
static const struct ksz_dev_ops ksz8_dev_ops = {
.setup = ksz8_setup,
.get_port_addr = ksz8_get_port_addr,
@@ -298,6 +365,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
};
static const u16 ksz8795_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x68,
[REG_IND_CTRL_0] = 0x6E,
[REG_IND_DATA_8] = 0x70,
[REG_IND_DATA_CHECK] = 0x72,
@@ -426,6 +494,7 @@ static u8 ksz8863_shifts[] = {
};
static const u16 ksz9477_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x0302,
[P_STP_CTRL] = 0x0B04,
[S_START_CTRL] = 0x0300,
[S_BROADCAST_CTRL] = 0x0332,
@@ -2498,8 +2567,7 @@ static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
return dev->dev_ops->mdb_del(dev, port, mdb, db);
}
-static int ksz_enable_port(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+static int ksz_port_setup(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
@@ -2562,6 +2630,23 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_update_port_member(dev, port);
}
+static void ksz_port_teardown(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ if (dsa_is_user_port(ds, port))
+ ksz9477_port_acl_free(dev, port);
+ }
+}
+
static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
@@ -3107,6 +3192,44 @@ static int ksz_switch_detect(struct ksz_device *dev)
return 0;
}
+static int ksz_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ksz_device *dev = ds->priv;
+
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ return ksz9477_cls_flower_add(ds, port, cls, ingress);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int ksz_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ksz_device *dev = ds->priv;
+
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ return ksz9477_cls_flower_del(ds, port, cls, ingress);
+ }
+
+ return -EOPNOTSUPP;
+}
+
/* Bandwidth is calculated by idle slope/transmission speed. Then the Bandwidth
* is converted to Hex-decimal using the successive multiplication method. On
* every step, integer part is taken and decimal part is carry forwarded.
@@ -3419,6 +3542,149 @@ static int ksz_setup_tc(struct dsa_switch *ds, int port,
}
}
+static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
+ const unsigned char *addr)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+
+ if (dp->hsr_dev) {
+ dev_err(ds->dev,
+ "Cannot change MAC address on port %d with active HSR offload\n",
+ port);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/* Program the switch's MAC address register with the MAC address of the
+ * requesting user port. This single address is used by the switch for multiple
+ * features, like HSR self-address filtering and WoL. Other user ports are
+ * allowed to share ownership of this address as long as their MAC address is
+ * the same. The user ports' MAC addresses must not change while they have
+ * ownership of the switch MAC address.
+ */
+static int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *slave = dsa_to_port(ds, port)->slave;
+ const unsigned char *addr = slave->dev_addr;
+ struct ksz_switch_macaddr *switch_macaddr;
+ struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ int i;
+
+ /* Make sure concurrent MAC address changes are blocked */
+ ASSERT_RTNL();
+
+ switch_macaddr = dev->switch_macaddr;
+ if (switch_macaddr) {
+ if (!ether_addr_equal(switch_macaddr->addr, addr)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Switch already configured for MAC address %pM",
+ switch_macaddr->addr);
+ return -EBUSY;
+ }
+
+ refcount_inc(&switch_macaddr->refcount);
+ return 0;
+ }
+
+ switch_macaddr = kzalloc(sizeof(*switch_macaddr), GFP_KERNEL);
+ if (!switch_macaddr)
+ return -ENOMEM;
+
+ ether_addr_copy(switch_macaddr->addr, addr);
+ refcount_set(&switch_macaddr->refcount, 1);
+ dev->switch_macaddr = switch_macaddr;
+
+ /* Program the switch MAC address to hardware */
+ for (i = 0; i < ETH_ALEN; i++)
+ ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, addr[i]);
+
+ return 0;
+}
+
+static void ksz_switch_macaddr_put(struct dsa_switch *ds)
+{
+ struct ksz_switch_macaddr *switch_macaddr;
+ struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ int i;
+
+ /* Make sure concurrent MAC address changes are blocked */
+ ASSERT_RTNL();
+
+ switch_macaddr = dev->switch_macaddr;
+ if (!refcount_dec_and_test(&switch_macaddr->refcount))
+ return;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, 0);
+
+ dev->switch_macaddr = NULL;
+ kfree(switch_macaddr);
+}
+
+static int ksz_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ enum hsr_version ver;
+ int ret;
+
+ ret = hsr_get_version(hsr, &ver);
+ if (ret)
+ return ret;
+
+ if (dev->chip_id != KSZ9477_CHIP_ID) {
+ NL_SET_ERR_MSG_MOD(extack, "Chip does not support HSR offload");
+ return -EOPNOTSUPP;
+ }
+
+ /* KSZ9477 can support HW offloading of only 1 HSR device */
+ if (dev->hsr_dev && hsr != dev->hsr_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload supported for a single HSR");
+ return -EOPNOTSUPP;
+ }
+
+ /* KSZ9477 only supports HSR v0 and v1 */
+ if (!(ver == HSR_V0 || ver == HSR_V1)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only HSR v0 and v1 supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* Self MAC address filtering, to avoid frames traversing
+ * the HSR ring more than once.
+ */
+ ret = ksz_switch_macaddr_get(ds, port, extack);
+ if (ret)
+ return ret;
+
+ ksz9477_hsr_join(ds, port, hsr);
+ dev->hsr_dev = hsr;
+ dev->hsr_ports |= BIT(port);
+
+ return 0;
+}
+
+static int ksz_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ struct ksz_device *dev = ds->priv;
+
+ WARN_ON(dev->chip_id != KSZ9477_CHIP_ID);
+
+ ksz9477_hsr_leave(ds, port, hsr);
+ dev->hsr_ports &= ~BIT(port);
+ if (!dev->hsr_ports)
+ dev->hsr_dev = NULL;
+
+ ksz_switch_macaddr_put(ds);
+
+ return 0;
+}
+
static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.connect_tag_protocol = ksz_connect_tag_protocol,
@@ -3431,14 +3697,18 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.phylink_mac_config = ksz_phylink_mac_config,
.phylink_mac_link_up = ksz_phylink_mac_link_up,
.phylink_mac_link_down = ksz_mac_link_down,
- .port_enable = ksz_enable_port,
+ .port_setup = ksz_port_setup,
.set_ageing_time = ksz_set_ageing_time,
.get_strings = ksz_get_strings,
.get_ethtool_stats = ksz_get_ethtool_stats,
.get_sset_count = ksz_sset_count,
.port_bridge_join = ksz_port_bridge_join,
.port_bridge_leave = ksz_port_bridge_leave,
+ .port_hsr_join = ksz_hsr_join,
+ .port_hsr_leave = ksz_hsr_leave,
+ .port_set_mac_address = ksz_port_set_mac_address,
.port_stp_state_set = ksz_port_stp_state_set,
+ .port_teardown = ksz_port_teardown,
.port_pre_bridge_flags = ksz_port_pre_bridge_flags,
.port_bridge_flags = ksz_port_bridge_flags,
.port_fast_age = ksz_port_fast_age,
@@ -3461,6 +3731,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_hwtstamp_set = ksz_hwtstamp_set,
.port_txtstamp = ksz_port_txtstamp,
.port_rxtstamp = ksz_port_rxtstamp,
+ .cls_flower_add = ksz_cls_flower_add,
+ .cls_flower_del = ksz_cls_flower_del,
.port_setup_tc = ksz_setup_tc,
.get_mac_eee = ksz_get_mac_eee,
.set_mac_eee = ksz_set_mac_eee,
@@ -3530,6 +3802,245 @@ static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num,
dev->ports[port_num].rgmii_tx_val = tx_delay;
}
+/**
+ * ksz_drive_strength_to_reg() - Convert drive strength value to corresponding
+ * register value.
+ * @array: The array of drive strength values to search.
+ * @array_size: The size of the array.
+ * @microamp: The drive strength value in microamp to be converted.
+ *
+ * This function searches the array of drive strength values for the given
+ * microamp value and returns the corresponding register value for that drive.
+ *
+ * Returns: If found, the corresponding register value for that drive strength
+ * is returned. Otherwise, -EINVAL is returned indicating an invalid value.
+ */
+static int ksz_drive_strength_to_reg(const struct ksz_drive_strength *array,
+ size_t array_size, int microamp)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++) {
+ if (array[i].microamp == microamp)
+ return array[i].reg_val;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ksz_drive_strength_error() - Report invalid drive strength value
+ * @dev: ksz device
+ * @array: The array of drive strength values to search.
+ * @array_size: The size of the array.
+ * @microamp: Invalid drive strength value in microamp
+ *
+ * This function logs an error message when an unsupported drive strength value
+ * is detected. It lists out all the supported drive strength values for
+ * reference in the error message.
+ */
+static void ksz_drive_strength_error(struct ksz_device *dev,
+ const struct ksz_drive_strength *array,
+ size_t array_size, int microamp)
+{
+ char supported_values[100];
+ size_t remaining_size;
+ int added_len;
+ char *ptr;
+ int i;
+
+ remaining_size = sizeof(supported_values);
+ ptr = supported_values;
+
+ for (i = 0; i < array_size; i++) {
+ added_len = snprintf(ptr, remaining_size,
+ i == 0 ? "%d" : ", %d", array[i].microamp);
+
+ if (added_len >= remaining_size)
+ break;
+
+ ptr += added_len;
+ remaining_size -= added_len;
+ }
+
+ dev_err(dev->dev, "Invalid drive strength %d, supported values are %s\n",
+ microamp, supported_values);
+}
+
+/**
+ * ksz9477_drive_strength_write() - Set the drive strength for specific KSZ9477
+ * chip variants.
+ * @dev: ksz device
+ * @props: Array of drive strength properties to be applied
+ * @num_props: Number of properties in the array
+ *
+ * This function configures the drive strength for various KSZ9477 chip variants
+ * based on the provided properties. It handles chip-specific nuances and
+ * ensures only valid drive strengths are written to the respective chip.
+ *
+ * Return: 0 on successful configuration, a negative error code on failure.
+ */
+static int ksz9477_drive_strength_write(struct ksz_device *dev,
+ struct ksz_driver_strength_prop *props,
+ int num_props)
+{
+ size_t array_size = ARRAY_SIZE(ksz9477_drive_strengths);
+ int i, ret, reg;
+ u8 mask = 0;
+ u8 val = 0;
+
+ if (props[KSZ_DRIVER_STRENGTH_IO].value != -1)
+ dev_warn(dev->dev, "%s is not supported by this chip variant\n",
+ props[KSZ_DRIVER_STRENGTH_IO].name);
+
+ if (dev->chip_id == KSZ8795_CHIP_ID ||
+ dev->chip_id == KSZ8794_CHIP_ID ||
+ dev->chip_id == KSZ8765_CHIP_ID)
+ reg = KSZ8795_REG_SW_CTRL_20;
+ else
+ reg = KSZ9477_REG_SW_IO_STRENGTH;
+
+ for (i = 0; i < num_props; i++) {
+ if (props[i].value == -1)
+ continue;
+
+ ret = ksz_drive_strength_to_reg(ksz9477_drive_strengths,
+ array_size, props[i].value);
+ if (ret < 0) {
+ ksz_drive_strength_error(dev, ksz9477_drive_strengths,
+ array_size, props[i].value);
+ return ret;
+ }
+
+ mask |= SW_DRIVE_STRENGTH_M << props[i].offset;
+ val |= ret << props[i].offset;
+ }
+
+ return ksz_rmw8(dev, reg, mask, val);
+}
+
+/**
+ * ksz8830_drive_strength_write() - Set the drive strength configuration for
+ * KSZ8830 compatible chip variants.
+ * @dev: ksz device
+ * @props: Array of drive strength properties to be set
+ * @num_props: Number of properties in the array
+ *
+ * This function applies the specified drive strength settings to KSZ8830 chip
+ * variants (KSZ8873, KSZ8863).
+ * It ensures the configurations align with what the chip variant supports and
+ * warns or errors out on unsupported settings.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int ksz8830_drive_strength_write(struct ksz_device *dev,
+ struct ksz_driver_strength_prop *props,
+ int num_props)
+{
+ size_t array_size = ARRAY_SIZE(ksz8830_drive_strengths);
+ int microamp;
+ int i, ret;
+
+ for (i = 0; i < num_props; i++) {
+ if (props[i].value == -1 || i == KSZ_DRIVER_STRENGTH_IO)
+ continue;
+
+ dev_warn(dev->dev, "%s is not supported by this chip variant\n",
+ props[i].name);
+ }
+
+ microamp = props[KSZ_DRIVER_STRENGTH_IO].value;
+ ret = ksz_drive_strength_to_reg(ksz8830_drive_strengths, array_size,
+ microamp);
+ if (ret < 0) {
+ ksz_drive_strength_error(dev, ksz8830_drive_strengths,
+ array_size, microamp);
+ return ret;
+ }
+
+ return ksz_rmw8(dev, KSZ8873_REG_GLOBAL_CTRL_12,
+ KSZ8873_DRIVE_STRENGTH_16MA, ret);
+}
+
+/**
+ * ksz_parse_drive_strength() - Extract and apply drive strength configurations
+ * from device tree properties.
+ * @dev: ksz device
+ *
+ * This function reads the specified drive strength properties from the
+ * device tree, validates against the supported chip variants, and sets
+ * them accordingly. An error should be critical here, as the drive strength
+ * settings are crucial for EMI compliance.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int ksz_parse_drive_strength(struct ksz_device *dev)
+{
+ struct ksz_driver_strength_prop of_props[] = {
+ [KSZ_DRIVER_STRENGTH_HI] = {
+ .name = "microchip,hi-drive-strength-microamp",
+ .offset = SW_HI_SPEED_DRIVE_STRENGTH_S,
+ .value = -1,
+ },
+ [KSZ_DRIVER_STRENGTH_LO] = {
+ .name = "microchip,lo-drive-strength-microamp",
+ .offset = SW_LO_SPEED_DRIVE_STRENGTH_S,
+ .value = -1,
+ },
+ [KSZ_DRIVER_STRENGTH_IO] = {
+ .name = "microchip,io-drive-strength-microamp",
+ .offset = 0, /* don't care */
+ .value = -1,
+ },
+ };
+ struct device_node *np = dev->dev->of_node;
+ bool have_any_prop = false;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(of_props); i++) {
+ ret = of_property_read_u32(np, of_props[i].name,
+ &of_props[i].value);
+ if (ret && ret != -EINVAL)
+ dev_warn(dev->dev, "Failed to read %s\n",
+ of_props[i].name);
+ if (ret)
+ continue;
+
+ have_any_prop = true;
+ }
+
+ if (!have_any_prop)
+ return 0;
+
+ switch (dev->chip_id) {
+ case KSZ8830_CHIP_ID:
+ return ksz8830_drive_strength_write(dev, of_props,
+ ARRAY_SIZE(of_props));
+ case KSZ8795_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8765_CHIP_ID:
+ case KSZ8563_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ return ksz9477_drive_strength_write(dev, of_props,
+ ARRAY_SIZE(of_props));
+ default:
+ for (i = 0; i < ARRAY_SIZE(of_props); i++) {
+ if (of_props[i].value == -1)
+ continue;
+
+ dev_warn(dev->dev, "%s is not supported by this chip variant\n",
+ of_props[i].name);
+ }
+ }
+
+ return 0;
+}
+
int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
@@ -3612,6 +4123,10 @@ int ksz_switch_register(struct ksz_device *dev)
for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
+ ret = ksz_parse_drive_strength(dev);
+ if (ret)
+ return ret;
+
ret = of_get_phy_mode(dev->dev->of_node, &interface);
if (ret == 0)
dev->compat_interface = interface;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index a4de58847dea..8842efca0871 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -101,6 +101,11 @@ struct ksz_ptp_irq {
int num;
};
+struct ksz_switch_macaddr {
+ unsigned char addr[ETH_ALEN];
+ refcount_t refcount;
+};
+
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
bool learning;
@@ -117,6 +122,7 @@ struct ksz_port {
u32 rgmii_tx_val;
u32 rgmii_rx_val;
struct ksz_device *ksz_dev;
+ void *acl_priv;
struct ksz_irq pirq;
u8 num;
#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
@@ -169,6 +175,10 @@ struct ksz_device {
struct mutex lock_irq; /* IRQ Access */
struct ksz_irq girq;
struct ksz_ptp_data ptp_data;
+
+ struct ksz_switch_macaddr *switch_macaddr;
+ struct net_device *hsr_dev; /* HSR */
+ u8 hsr_ports;
};
/* List of supported models */
@@ -211,6 +221,7 @@ enum ksz_chip_id {
};
enum ksz_regs {
+ REG_SW_MAC_ADDR,
REG_IND_CTRL_0,
REG_IND_DATA_8,
REG_IND_DATA_CHECK,
@@ -689,6 +700,26 @@ static inline int is_lan937x(struct ksz_device *dev)
#define KSZ8_LEGAL_PACKET_SIZE 1518
#define KSZ9477_MAX_FRAME_SIZE 9000
+#define KSZ8873_REG_GLOBAL_CTRL_12 0x0e
+/* Drive Strength of I/O Pad
+ * 0: 8mA, 1: 16mA
+ */
+#define KSZ8873_DRIVE_STRENGTH_16MA BIT(6)
+
+#define KSZ8795_REG_SW_CTRL_20 0xa3
+#define KSZ9477_REG_SW_IO_STRENGTH 0x010d
+#define SW_DRIVE_STRENGTH_M 0x7
+#define SW_DRIVE_STRENGTH_2MA 0
+#define SW_DRIVE_STRENGTH_4MA 1
+#define SW_DRIVE_STRENGTH_8MA 2
+#define SW_DRIVE_STRENGTH_12MA 3
+#define SW_DRIVE_STRENGTH_16MA 4
+#define SW_DRIVE_STRENGTH_20MA 5
+#define SW_DRIVE_STRENGTH_24MA 6
+#define SW_DRIVE_STRENGTH_28MA 7
+#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
+#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
+
#define KSZ9477_REG_PORT_OUT_RATE_0 0x0420
#define KSZ9477_OUT_RATE_NO_LIMIT 0
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index 0a6a2fe34e64..b74a230a3f13 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -63,15 +63,12 @@ mt7988_probe(struct platform_device *pdev)
return dsa_register_switch(priv->ds);
}
-static int
-mt7988_remove(struct platform_device *pdev)
+static void mt7988_remove(struct platform_device *pdev)
{
struct mt7530_priv *priv = platform_get_drvdata(pdev);
if (priv)
mt7530_remove_common(priv);
-
- return 0;
}
static void mt7988_shutdown(struct platform_device *pdev)
@@ -88,7 +85,7 @@ static void mt7988_shutdown(struct platform_device *pdev)
static struct platform_driver mt7988_platform_driver = {
.probe = mt7988_probe,
- .remove = mt7988_remove,
+ .remove_new = mt7988_remove,
.shutdown = mt7988_shutdown,
.driver = {
.name = "mt7530-mmio",
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 035a34b50f31..0d62c69dfbb6 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2824,15 +2824,6 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs,
- unsigned int mode,
- phy_interface_t interface,
- int speed, int duplex)
-{
- if (pcs->ops->pcs_link_up)
- pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex);
-}
-
static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
@@ -2921,8 +2912,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
return ret;
mt7530_write(priv, MT7530_PMCR_P(port),
PMCR_CPU_PORT_SETTING(priv->id));
- mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED,
- interface, speed, DUPLEX_FULL);
mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
speed, DUPLEX_FULL, true, true);
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
index ba373656bfe1..9a8429f5d09c 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -208,7 +208,7 @@ static void mv88e639x_sgmii_pcs_pre_config(struct phylink_pcs *pcs,
static int mv88e6390_erratum_3_14(struct mv88e639x_pcs *mpcs)
{
- const int lanes[] = { MV88E6390_PORT9_LANE0, MV88E6390_PORT9_LANE1,
+ static const int lanes[] = { MV88E6390_PORT9_LANE0, MV88E6390_PORT9_LANE1,
MV88E6390_PORT9_LANE2, MV88E6390_PORT9_LANE3,
MV88E6390_PORT10_LANE0, MV88E6390_PORT10_LANE1,
MV88E6390_PORT10_LANE2, MV88E6390_PORT10_LANE3 };
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
index c29bee5a5c48..22187d831c4b 100644
--- a/drivers/net/dsa/ocelot/ocelot_ext.c
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -115,19 +115,17 @@ err_free_felix:
return err;
}
-static int ocelot_ext_remove(struct platform_device *pdev)
+static void ocelot_ext_remove(struct platform_device *pdev)
{
struct felix *felix = dev_get_drvdata(&pdev->dev);
if (!felix)
- return 0;
+ return;
dsa_unregister_switch(felix->ds);
kfree(felix->ds);
kfree(felix);
-
- return 0;
}
static void ocelot_ext_shutdown(struct platform_device *pdev)
@@ -154,7 +152,7 @@ static struct platform_driver ocelot_ext_switch_driver = {
.of_match_table = ocelot_ext_switch_of_match,
},
.probe = ocelot_ext_probe,
- .remove = ocelot_ext_remove,
+ .remove_new = ocelot_ext_remove,
.shutdown = ocelot_ext_shutdown,
};
module_platform_driver(ocelot_ext_switch_driver);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 8f912bda120b..049930da0521 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1029,19 +1029,17 @@ err_alloc_felix:
return err;
}
-static int seville_remove(struct platform_device *pdev)
+static void seville_remove(struct platform_device *pdev)
{
struct felix *felix = platform_get_drvdata(pdev);
if (!felix)
- return 0;
+ return;
dsa_unregister_switch(felix->ds);
kfree(felix->ds);
kfree(felix);
-
- return 0;
}
static void seville_shutdown(struct platform_device *pdev)
@@ -1064,7 +1062,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match);
static struct platform_driver seville_vsc9953_driver = {
.probe = seville_probe,
- .remove = seville_remove,
+ .remove_new = seville_remove,
.shutdown = seville_shutdown,
.driver = {
.name = "mscc_seville",
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index ff13563059c5..bfd11591faf4 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -506,12 +506,12 @@ static int realtek_smi_probe(struct platform_device *pdev)
return 0;
}
-static int realtek_smi_remove(struct platform_device *pdev)
+static void realtek_smi_remove(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
- return 0;
+ return;
dsa_unregister_switch(priv->ds);
if (priv->slave_mii_bus)
@@ -520,8 +520,6 @@ static int realtek_smi_remove(struct platform_device *pdev)
/* leave the device reset asserted */
if (priv->reset)
gpiod_set_value(priv->reset, 1);
-
- return 0;
}
static void realtek_smi_shutdown(struct platform_device *pdev)
@@ -559,7 +557,7 @@ static struct platform_driver realtek_smi_driver = {
.of_match_table = realtek_smi_of_match,
},
.probe = realtek_smi_probe,
- .remove = realtek_smi_remove,
+ .remove_new = realtek_smi_remove,
.shutdown = realtek_smi_shutdown,
};
module_platform_driver(realtek_smi_driver);
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 7868ef237f6c..b39b719a5b8f 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -95,12 +95,6 @@
#define RTL8366RB_PAACR_RX_PAUSE BIT(6)
#define RTL8366RB_PAACR_AN BIT(7)
-#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \
- RTL8366RB_PAACR_FULL_DUPLEX | \
- RTL8366RB_PAACR_LINK_UP | \
- RTL8366RB_PAACR_TX_PAUSE | \
- RTL8366RB_PAACR_RX_PAUSE)
-
/* bits 0..7 = port 0, bits 8..15 = port 1 */
#define RTL8366RB_PSTAT0 0x0014
/* bits 0..7 = port 2, bits 8..15 = port 3 */
@@ -1081,29 +1075,61 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct realtek_priv *priv = ds->priv;
+ unsigned int val;
int ret;
+ /* Allow forcing the mode on the fixed CPU port, no autonegotiation.
+ * We assume autonegotiation works on the PHY-facing ports.
+ */
if (port != priv->cpu_port)
return;
dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port);
- /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG,
BIT(port), BIT(port));
if (ret) {
- dev_err(priv->dev, "failed to force 1Gbit on CPU port\n");
+ dev_err(priv->dev, "failed to force CPU port\n");
return;
}
+ /* Conjure port config */
+ switch (speed) {
+ case SPEED_10:
+ val = RTL8366RB_PAACR_SPEED_10M;
+ break;
+ case SPEED_100:
+ val = RTL8366RB_PAACR_SPEED_100M;
+ break;
+ case SPEED_1000:
+ val = RTL8366RB_PAACR_SPEED_1000M;
+ break;
+ default:
+ val = RTL8366RB_PAACR_SPEED_1000M;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ val |= RTL8366RB_PAACR_FULL_DUPLEX;
+
+ if (tx_pause)
+ val |= RTL8366RB_PAACR_TX_PAUSE;
+
+ if (rx_pause)
+ val |= RTL8366RB_PAACR_RX_PAUSE;
+
+ val |= RTL8366RB_PAACR_LINK_UP;
+
ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2,
0xFF00U,
- RTL8366RB_PAACR_CPU_PORT << 8);
+ val << 8);
if (ret) {
dev_err(priv->dev, "failed to set PAACR on CPU port\n");
return;
}
+ dev_dbg(priv->dev, "set PAACR to %04x\n", val);
+
/* Enable the CPU port */
ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 2eda10b33f2e..10092ea85e46 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -1272,19 +1272,17 @@ free_pcs:
return ret;
}
-static int a5psw_remove(struct platform_device *pdev)
+static void a5psw_remove(struct platform_device *pdev)
{
struct a5psw *a5psw = platform_get_drvdata(pdev);
if (!a5psw)
- return 0;
+ return;
dsa_unregister_switch(&a5psw->ds);
a5psw_pcs_free(a5psw);
clk_disable_unprepare(a5psw->hclk);
clk_disable_unprepare(a5psw->clk);
-
- return 0;
}
static void a5psw_shutdown(struct platform_device *pdev)
@@ -1311,7 +1309,7 @@ static struct platform_driver a5psw_driver = {
.of_match_table = a5psw_of_mtable,
},
.probe = a5psw_probe,
- .remove = a5psw_remove,
+ .remove_new = a5psw_remove,
.shutdown = a5psw_shutdown,
};
module_platform_driver(a5psw_driver);
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index e3699f76f6d7..08a3e7b96254 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -153,14 +153,14 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_tx_clk;
- const int mac_clk_sources[] = {
+ static const int mac_clk_sources[] = {
CLKSRC_MII0_TX_CLK,
CLKSRC_MII1_TX_CLK,
CLKSRC_MII2_TX_CLK,
CLKSRC_MII3_TX_CLK,
CLKSRC_MII4_TX_CLK,
};
- const int phy_clk_sources[] = {
+ static const int phy_clk_sources[] = {
CLKSRC_IDIV0,
CLKSRC_IDIV1,
CLKSRC_IDIV2,
@@ -194,7 +194,7 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_rx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
- const int clk_sources[] = {
+ static const int clk_sources[] = {
CLKSRC_MII0_RX_CLK,
CLKSRC_MII1_RX_CLK,
CLKSRC_MII2_RX_CLK,
@@ -221,7 +221,7 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
- const int clk_sources[] = {
+ static const int clk_sources[] = {
CLKSRC_IDIV0,
CLKSRC_IDIV1,
CLKSRC_IDIV2,
@@ -248,7 +248,7 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
- const int clk_sources[] = {
+ static const int clk_sources[] = {
CLKSRC_IDIV0,
CLKSRC_IDIV1,
CLKSRC_IDIV2,
@@ -349,8 +349,13 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
clksrc = CLKSRC_PLL0;
} else {
- int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
- CLKSRC_IDIV3, CLKSRC_IDIV4};
+ static const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
clksrc = clk_sources[port];
}
@@ -638,7 +643,7 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl ref_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
- const int clk_sources[] = {
+ static const int clk_sources[] = {
CLKSRC_MII0_TX_CLK,
CLKSRC_MII1_TX_CLK,
CLKSRC_MII2_TX_CLK,
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index bd4206e8f9af..755b7895a15a 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -112,16 +112,14 @@ static int vsc73xx_platform_probe(struct platform_device *pdev)
return vsc73xx_probe(&vsc_platform->vsc);
}
-static int vsc73xx_platform_remove(struct platform_device *pdev)
+static void vsc73xx_platform_remove(struct platform_device *pdev)
{
struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
if (!vsc_platform)
- return 0;
+ return;
vsc73xx_remove(&vsc_platform->vsc);
-
- return 0;
}
static void vsc73xx_platform_shutdown(struct platform_device *pdev)
@@ -160,7 +158,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static struct platform_driver vsc73xx_platform_driver = {
.probe = vsc73xx_platform_probe,
- .remove = vsc73xx_platform_remove,
+ .remove_new = vsc73xx_platform_remove,
.shutdown = vsc73xx_platform_shutdown,
.driver = {
.name = "vsc73xx-platform",
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 753fef757f11..5b02e9e426fd 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -548,7 +548,8 @@ static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
}
static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
- struct net_device *hsr)
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack)
{
unsigned int val = XRS_HSR_CFG_HSR_PRP;
struct dsa_port *partner = NULL, *dp;
@@ -562,16 +563,21 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
if (ret)
return ret;
- /* Only ports 1 and 2 can be HSR/PRP redundant ports. */
- if (port != 1 && port != 2)
+ if (port != 1 && port != 2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only ports 1 and 2 can offload HSR/PRP");
return -EOPNOTSUPP;
+ }
- if (ver == HSR_V1)
+ if (ver == HSR_V1) {
val |= XRS_HSR_CFG_HSR;
- else if (ver == PRP_V1)
+ } else if (ver == PRP_V1) {
val |= XRS_HSR_CFG_PRP;
- else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only HSR v1 and PRP v1 can be offloaded");
return -EOPNOTSUPP;
+ }
dsa_hsr_foreach_port(dp, ds, hsr) {
if (dp->index != port) {
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index af603256b724..2874680ef24d 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -811,7 +811,7 @@ static int ax_init_dev(struct net_device *dev)
return ret;
}
-static int ax_remove(struct platform_device *pdev)
+static void ax_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ei_device *ei_local = netdev_priv(dev);
@@ -832,8 +832,6 @@ static int ax_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
-
- return 0;
}
/*
@@ -1011,7 +1009,7 @@ static struct platform_driver axdrv = {
.name = "ax88796",
},
.probe = ax_probe,
- .remove = ax_remove,
+ .remove_new = ax_remove,
.suspend = ax_suspend,
.resume = ax_resume,
};
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 217838b28220..5a0fa995e643 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -441,7 +441,7 @@ static int mcf8390_probe(struct platform_device *pdev)
return 0;
}
-static int mcf8390_remove(struct platform_device *pdev)
+static void mcf8390_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct resource *mem;
@@ -450,7 +450,6 @@ static int mcf8390_remove(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
free_netdev(dev);
- return 0;
}
static struct platform_driver mcf8390_drv = {
@@ -458,7 +457,7 @@ static struct platform_driver mcf8390_drv = {
.name = "mcf8390",
},
.probe = mcf8390_probe,
- .remove = mcf8390_remove,
+ .remove_new = mcf8390_remove,
};
module_platform_driver(mcf8390_drv);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 7d89ec1cf273..350683a09d2e 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -823,7 +823,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
return 0;
}
-static int ne_drv_remove(struct platform_device *pdev)
+static void ne_drv_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -842,7 +842,6 @@ static int ne_drv_remove(struct platform_device *pdev)
release_region(dev->base_addr, NE_IO_EXTENT);
free_netdev(dev);
}
- return 0;
}
/* Remove unused devices or all if true. */
@@ -895,7 +894,7 @@ static int ne_drv_resume(struct platform_device *pdev)
#endif
static struct platform_driver ne_driver = {
- .remove = ne_drv_remove,
+ .remove_new = ne_drv_remove,
.suspend = ne_drv_suspend,
.resume = ne_drv_resume,
.driver = {
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index c6f8f852bff1..e03193da5874 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1582,15 +1582,13 @@ static int owl_emac_probe(struct platform_device *pdev)
return 0;
}
-static int owl_emac_remove(struct platform_device *pdev)
+static void owl_emac_remove(struct platform_device *pdev)
{
struct owl_emac_priv *priv = platform_get_drvdata(pdev);
netif_napi_del(&priv->napi);
phy_disconnect(priv->netdev->phydev);
cancel_work_sync(&priv->mac_reset_task);
-
- return 0;
}
static const struct of_device_id owl_emac_of_match[] = {
@@ -1609,7 +1607,7 @@ static struct platform_driver owl_emac_driver = {
.pm = &owl_emac_pm_ops,
},
.probe = owl_emac_probe,
- .remove = owl_emac_remove,
+ .remove_new = owl_emac_remove,
};
module_platform_driver(owl_emac_driver);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 597a02c75d52..27af7746d645 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1525,7 +1525,7 @@ error1:
return err;
}
-static int greth_of_remove(struct platform_device *of_dev)
+static void greth_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = platform_get_drvdata(of_dev);
struct greth_private *greth = netdev_priv(ndev);
@@ -1544,8 +1544,6 @@ static int greth_of_remove(struct platform_device *of_dev)
of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id greth_of_match[] = {
@@ -1566,7 +1564,7 @@ static struct platform_driver greth_of_driver = {
.of_match_table = greth_of_match,
},
.probe = greth_of_probe,
- .remove = greth_of_remove,
+ .remove_new = greth_of_remove,
};
module_platform_driver(greth_of_driver);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index a94c62956eed..d761c08fe5c1 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -1083,7 +1083,7 @@ out:
return ret;
}
-static int emac_remove(struct platform_device *pdev)
+static void emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_board_info *db = netdev_priv(ndev);
@@ -1101,7 +1101,6 @@ static int emac_remove(struct platform_device *pdev)
free_netdev(ndev);
dev_dbg(&pdev->dev, "released and freed device\n");
- return 0;
}
static int emac_suspend(struct platform_device *dev, pm_message_t state)
@@ -1143,7 +1142,7 @@ static struct platform_driver emac_driver = {
.of_match_table = emac_of_match,
},
.probe = emac_probe,
- .remove = emac_remove,
+ .remove_new = emac_remove,
.suspend = emac_suspend,
.resume = emac_resume,
};
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 2e15800e5310..1b1799985d1d 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1464,7 +1464,7 @@ err_free_netdev:
/* Remove Altera TSE MAC device
*/
-static int altera_tse_remove(struct platform_device *pdev)
+static void altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct altera_tse_private *priv = netdev_priv(ndev);
@@ -1476,8 +1476,6 @@ static int altera_tse_remove(struct platform_device *pdev)
lynx_pcs_destroy(priv->pcs);
free_netdev(ndev);
-
- return 0;
}
static const struct altera_dmaops altera_dtype_sgdma = {
@@ -1528,7 +1526,7 @@ MODULE_DEVICE_TABLE(of, altera_tse_ids);
static struct platform_driver altera_tse_driver = {
.probe = altera_tse_probe,
- .remove = altera_tse_remove,
+ .remove_new = altera_tse_remove,
.suspend = NULL,
.resume = NULL,
.driver = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f955bde10cf9..b5bca4814830 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1828,7 +1828,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
}
if (xdp_flags & ENA_XDP_REDIRECT)
- xdp_do_flush_map();
+ xdp_do_flush();
return work_done;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index c5cec4e79489..85c978149bf6 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1323,7 +1323,7 @@ out:
return err;
}
-static int au1000_remove(struct platform_device *pdev)
+static void au1000_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct au1000_private *aup = netdev_priv(dev);
@@ -1359,13 +1359,11 @@ static int au1000_remove(struct platform_device *pdev)
release_mem_region(macen->start, resource_size(macen));
free_netdev(dev);
-
- return 0;
}
static struct platform_driver au1000_eth_driver = {
.probe = au1000_probe,
- .remove = au1000_remove,
+ .remove_new = au1000_remove,
.driver = {
.name = "au1000-eth",
},
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 36f9b932b9e2..2a8643e167e1 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -445,12 +445,13 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
goto err_out_teardown;
/* Set up the VIFs */
- err = pdsc_viftypes_init(pdsc);
- if (err)
- goto err_out_teardown;
+ if (init) {
+ err = pdsc_viftypes_init(pdsc);
+ if (err)
+ goto err_out_teardown;
- if (init)
pdsc_debugfs_add_viftype(pdsc);
+ }
clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
return 0;
@@ -469,8 +470,10 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
pdsc_qcq_free(pdsc, &pdsc->adminqcq);
- kfree(pdsc->viftype_status);
- pdsc->viftype_status = NULL;
+ if (removing) {
+ kfree(pdsc->viftype_status);
+ pdsc->viftype_status = NULL;
+ }
if (pdsc->intr_info) {
for (i = 0; i < pdsc->nintrs; i++)
@@ -512,7 +515,7 @@ void pdsc_stop(struct pdsc *pdsc)
PDS_CORE_INTR_MASK_SET);
}
-static void pdsc_fw_down(struct pdsc *pdsc)
+void pdsc_fw_down(struct pdsc *pdsc)
{
union pds_core_notifyq_comp reset_event = {
.reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
@@ -520,10 +523,13 @@ static void pdsc_fw_down(struct pdsc *pdsc)
};
if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
- dev_err(pdsc->dev, "%s: already happening\n", __func__);
+ dev_warn(pdsc->dev, "%s: already happening\n", __func__);
return;
}
+ if (pdsc->pdev->is_virtfn)
+ return;
+
/* Notify clients of fw_down */
if (pdsc->fw_reporter)
devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
@@ -533,7 +539,7 @@ static void pdsc_fw_down(struct pdsc *pdsc)
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
-static void pdsc_fw_up(struct pdsc *pdsc)
+void pdsc_fw_up(struct pdsc *pdsc)
{
union pds_core_notifyq_comp reset_event = {
.reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
@@ -546,6 +552,11 @@ static void pdsc_fw_up(struct pdsc *pdsc)
return;
}
+ if (pdsc->pdev->is_virtfn) {
+ clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
+ return;
+ }
+
err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
if (err)
goto err_out;
@@ -567,6 +578,18 @@ err_out:
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
+static void pdsc_check_pci_health(struct pdsc *pdsc)
+{
+ u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+
+ /* is PCI broken? */
+ if (fw_status != PDS_RC_BAD_PCI)
+ return;
+
+ pdsc_reset_prepare(pdsc->pdev);
+ pdsc_reset_done(pdsc->pdev);
+}
+
void pdsc_health_thread(struct work_struct *work)
{
struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
@@ -593,6 +616,8 @@ void pdsc_health_thread(struct work_struct *work)
pdsc_fw_down(pdsc);
}
+ pdsc_check_pci_health(pdsc);
+
pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
out_unlock:
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index e545fafc4819..f3a7deda9972 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -283,6 +283,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc);
int pdsc_dev_reinit(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
+void pdsc_reset_prepare(struct pci_dev *pdev);
+void pdsc_reset_done(struct pci_dev *pdev);
+
int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
irq_handler_t handler, void *data);
void pdsc_intr_free(struct pdsc *pdsc, int index);
@@ -309,4 +312,8 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data);
int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
struct netlink_ext_ack *extack);
+
+void pdsc_fw_down(struct pdsc *pdsc);
+void pdsc_fw_up(struct pdsc *pdsc);
+
#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index f77cd9f5a2fd..7c1b965d61a9 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -42,6 +42,8 @@ int pdsc_err_to_errno(enum pds_core_status_code code)
return -ERANGE;
case PDS_RC_BAD_ADDR:
return -EFAULT;
+ case PDS_RC_BAD_PCI:
+ return -ENXIO;
case PDS_RC_EOPCODE:
case PDS_RC_EINTR:
case PDS_RC_DEV_CMD:
@@ -62,7 +64,7 @@ bool pdsc_is_fw_running(struct pdsc *pdsc)
/* Firmware is useful only if the running bit is set and
* fw_status != 0xff (bad PCI read)
*/
- return (pdsc->fw_status != 0xff) &&
+ return (pdsc->fw_status != PDS_RC_BAD_PCI) &&
(pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING);
}
@@ -128,6 +130,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
unsigned long max_wait;
unsigned long duration;
int timeout = 0;
+ bool running;
int done = 0;
int err = 0;
int status;
@@ -136,6 +139,10 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
max_wait = start_time + (max_seconds * HZ);
while (!done && !timeout) {
+ running = pdsc_is_fw_running(pdsc);
+ if (!running)
+ break;
+
done = pdsc_devcmd_done(pdsc);
if (done)
break;
@@ -152,7 +159,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
dev_dbg(dev, "DEVCMD %d %s after %ld secs\n",
opcode, pdsc_devcmd_str(opcode), duration / HZ);
- if (!done || timeout) {
+ if ((!done || timeout) && running) {
dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n",
opcode, pdsc_devcmd_str(opcode), done, timeout,
max_seconds);
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 3a45bf474a19..3080898d7b95 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -445,12 +445,62 @@ static void pdsc_remove(struct pci_dev *pdev)
devlink_free(dl);
}
+void pdsc_reset_prepare(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+
+ pdsc_fw_down(pdsc);
+
+ pci_free_irq_vectors(pdev);
+ pdsc_unmap_bars(pdsc);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+void pdsc_reset_done(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct device *dev = pdsc->dev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err));
+ return;
+ }
+ pci_set_master(pdev);
+
+ if (!pdev->is_virtfn) {
+ pcie_print_link_status(pdsc->pdev);
+
+ err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME);
+ if (err) {
+ dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ err = pdsc_map_bars(pdsc);
+ if (err)
+ return;
+ }
+
+ pdsc_fw_up(pdsc);
+}
+
+static const struct pci_error_handlers pdsc_err_handler = {
+ /* FLR handling */
+ .reset_prepare = pdsc_reset_prepare,
+ .reset_done = pdsc_reset_done,
+};
+
static struct pci_driver pdsc_driver = {
.name = PDS_CORE_DRV_NAME,
.id_table = pdsc_id_table,
.probe = pdsc_probe,
.remove = pdsc_remove,
.sriov_configure = pdsc_sriov_configure,
+ .err_handler = &pdsc_err_handler,
};
void *pdsc_get_pf_struct(struct pci_dev *vf_pdev)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 33bb539ad70a..c78706d21a6a 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1487,7 +1487,7 @@ static int sunlance_sbus_probe(struct platform_device *op)
return err;
}
-static int sunlance_sbus_remove(struct platform_device *op)
+static void sunlance_sbus_remove(struct platform_device *op)
{
struct lance_private *lp = platform_get_drvdata(op);
struct net_device *net_dev = lp->dev;
@@ -1497,8 +1497,6 @@ static int sunlance_sbus_remove(struct platform_device *op)
lance_free_hwresources(lp);
free_netdev(net_dev);
-
- return 0;
}
static const struct of_device_id sunlance_sbus_match[] = {
@@ -1516,7 +1514,7 @@ static struct platform_driver sunlance_sbus_driver = {
.of_match_table = sunlance_sbus_match,
},
.probe = sunlance_sbus_probe,
- .remove = sunlance_sbus_remove,
+ .remove_new = sunlance_sbus_remove,
};
module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4d790a89fe77..91842a5e161b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -512,7 +512,7 @@ err_alloc:
return ret;
}
-static int xgbe_platform_remove(struct platform_device *pdev)
+static void xgbe_platform_remove(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
@@ -521,8 +521,6 @@ static int xgbe_platform_remove(struct platform_device *pdev)
platform_device_put(pdata->phy_platdev);
xgbe_free_pdata(pdata);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -615,7 +613,7 @@ static struct platform_driver xgbe_driver = {
.pm = &xgbe_platform_pm_ops,
},
.probe = xgbe_platform_probe,
- .remove = xgbe_platform_remove,
+ .remove_new = xgbe_platform_remove,
};
int xgbe_platform_init(void)
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 379d19d18dbe..9e90c2381491 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -690,7 +690,7 @@ err:
return ret;
}
-static int xge_remove(struct platform_device *pdev)
+static void xge_remove(struct platform_device *pdev)
{
struct xge_pdata *pdata;
struct net_device *ndev;
@@ -706,8 +706,6 @@ static int xge_remove(struct platform_device *pdev)
xge_mdio_remove(ndev);
unregister_netdev(ndev);
free_netdev(ndev);
-
- return 0;
}
static void xge_shutdown(struct platform_device *pdev)
@@ -736,7 +734,7 @@ static struct platform_driver xge_driver = {
.acpi_match_table = ACPI_PTR(xge_acpi_match),
},
.probe = xge_probe,
- .remove = xge_remove,
+ .remove_new = xge_remove,
.shutdown = xge_shutdown,
};
module_platform_driver(xge_driver);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4d4140b7c450..b5d9f9a55b7f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -2127,7 +2127,7 @@ err:
return ret;
}
-static int xgene_enet_remove(struct platform_device *pdev)
+static void xgene_enet_remove(struct platform_device *pdev)
{
struct xgene_enet_pdata *pdata;
struct net_device *ndev;
@@ -2149,8 +2149,6 @@ static int xgene_enet_remove(struct platform_device *pdev)
xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
-
- return 0;
}
static void xgene_enet_shutdown(struct platform_device *pdev)
@@ -2174,7 +2172,7 @@ static struct platform_driver xgene_enet_driver = {
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
- .remove = xgene_enet_remove,
+ .remove_new = xgene_enet_remove,
.shutdown = xgene_enet_shutdown,
};
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 8775c3234e91..766ab78256fe 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -739,7 +739,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
MODULE_ALIAS("platform:macmace");
-static int mac_mace_device_remove(struct platform_device *pdev)
+static void mac_mace_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct mace_data *mp = netdev_priv(dev);
@@ -755,13 +755,11 @@ static int mac_mace_device_remove(struct platform_device *pdev)
mp->tx_ring, mp->tx_ring_phys);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver mac_mace_driver = {
.probe = mace_probe,
- .remove = mac_mace_device_remove,
+ .remove_new = mac_mace_device_remove,
.driver = {
.name = mac_mace_string,
},
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index ce3147e886a1..a3afddb23ee8 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -58,14 +58,12 @@ out_netdev:
return err;
}
-static int emac_arc_remove(struct platform_device *pdev)
+static void emac_arc_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
arc_emac_remove(ndev);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id emac_arc_dt_ids[] = {
@@ -76,7 +74,7 @@ MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
static struct platform_driver emac_arc_driver = {
.probe = emac_arc_probe,
- .remove = emac_arc_remove,
+ .remove_new = emac_arc_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = emac_arc_dt_ids,
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 509101112279..493d6356c8ca 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -244,7 +244,7 @@ out_netdev:
return err;
}
-static int emac_rockchip_remove(struct platform_device *pdev)
+static void emac_rockchip_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct rockchip_priv_data *priv = netdev_priv(ndev);
@@ -260,12 +260,11 @@ static int emac_rockchip_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->macclk);
free_netdev(ndev);
- return 0;
}
static struct platform_driver emac_rockchip_driver = {
.probe = emac_rockchip_probe,
- .remove = emac_rockchip_remove,
+ .remove_new = emac_rockchip_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = emac_rockchip_dt_ids,
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 009e0b3066fa..0f2f400b5bc4 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1968,21 +1968,19 @@ err_put_clk:
return err;
}
-static int ag71xx_remove(struct platform_device *pdev)
+static void ag71xx_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ag71xx *ag;
if (!ndev)
- return 0;
+ return;
ag = netdev_priv(ndev);
unregister_netdev(ndev);
ag71xx_mdio_remove(ag);
clk_disable_unprepare(ag->clk_eth);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static const u32 ar71xx_fifo_ar7100[] = {
@@ -2069,7 +2067,7 @@ static const struct of_device_id ag71xx_match[] = {
static struct platform_driver ag71xx_driver = {
.probe = ag71xx_probe,
- .remove = ag71xx_remove,
+ .remove_new = ag71xx_remove,
.driver = {
.name = "ag71xx",
.of_match_table = ag71xx_match,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 43d821fe7a54..63ba64dbb731 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -504,15 +504,12 @@ struct atl1c_rrd_ring {
u16 next_to_use;
u16 next_to_clean;
struct napi_struct napi;
- struct page *rx_page;
- unsigned int rx_page_offset;
};
/* board specific private data structure */
struct atl1c_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- unsigned int rx_frag_size;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
struct mii_if_info mii; /* MII interface info */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 940c5d1ff9cf..46cdc32b4e31 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -483,15 +483,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
struct net_device *dev)
{
- unsigned int head_size;
int mtu = dev->mtu;
adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
-
- head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- adapter->rx_frag_size = roundup_pow_of_two(head_size);
}
static netdev_features_t atl1c_fix_features(struct net_device *netdev,
@@ -847,7 +842,8 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
}
static inline void atl1c_clean_buffer(struct pci_dev *pdev,
- struct atl1c_buffer *buffer_info)
+ struct atl1c_buffer *buffer_info,
+ int budget)
{
u16 pci_driection;
if (buffer_info->flags & ATL1C_BUFFER_FREE)
@@ -866,7 +862,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
buffer_info->length, pci_driection);
}
if (buffer_info->skb)
- dev_consume_skb_any(buffer_info->skb);
+ napi_consume_skb(buffer_info->skb, budget);
buffer_info->dma = 0;
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
@@ -887,7 +883,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
ring_count = tpd_ring->count;
for (index = 0; index < ring_count; index++) {
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(pdev, buffer_info);
+ atl1c_clean_buffer(pdev, buffer_info, 0);
}
netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
@@ -914,7 +910,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue)
for (j = 0; j < rfd_ring->count; j++) {
buffer_info = &rfd_ring->buffer_info[j];
- atl1c_clean_buffer(pdev, buffer_info);
+ atl1c_clean_buffer(pdev, buffer_info, 0);
}
/* zero out the descriptor ring */
memset(rfd_ring->desc, 0, rfd_ring->size);
@@ -964,7 +960,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- int i;
dma_free_coherent(&pdev->dev, adapter->ring_header.size,
adapter->ring_header.desc, adapter->ring_header.dma);
@@ -977,12 +972,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
kfree(adapter->tpd_ring[0].buffer_info);
adapter->tpd_ring[0].buffer_info = NULL;
}
- for (i = 0; i < adapter->rx_queue_count; ++i) {
- if (adapter->rrd_ring[i].rx_page) {
- put_page(adapter->rrd_ring[i].rx_page);
- adapter->rrd_ring[i].rx_page = NULL;
- }
- }
}
/**
@@ -1619,7 +1608,7 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
total_bytes += buffer_info->skb->len;
total_packets++;
}
- atl1c_clean_buffer(pdev, buffer_info);
+ atl1c_clean_buffer(pdev, buffer_info, budget);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1754,48 +1743,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
skb_checksum_none_assert(skb);
}
-static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
- u32 queue, bool napi_mode)
-{
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
- struct sk_buff *skb;
- struct page *page;
-
- if (adapter->rx_frag_size > PAGE_SIZE) {
- if (likely(napi_mode))
- return napi_alloc_skb(&rrd_ring->napi,
- adapter->rx_buffer_len);
- else
- return netdev_alloc_skb_ip_align(adapter->netdev,
- adapter->rx_buffer_len);
- }
-
- page = rrd_ring->rx_page;
- if (!page) {
- page = alloc_page(GFP_ATOMIC);
- if (unlikely(!page))
- return NULL;
- rrd_ring->rx_page = page;
- rrd_ring->rx_page_offset = 0;
- }
-
- skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
- adapter->rx_frag_size);
- if (likely(skb)) {
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- rrd_ring->rx_page_offset += adapter->rx_frag_size;
- if (rrd_ring->rx_page_offset >= PAGE_SIZE)
- rrd_ring->rx_page = NULL;
- else
- get_page(page);
- }
- return skb;
-}
-
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
bool napi_mode)
{
struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
+ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
struct pci_dev *pdev = adapter->pdev;
struct atl1c_buffer *buffer_info, *next_info;
struct sk_buff *skb;
@@ -1814,13 +1766,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
- skb = atl1c_alloc_skb(adapter, queue, napi_mode);
+ /* When DMA RX address is set to something like
+ * 0x....fc0, it will be very likely to cause DMA
+ * RFD overflow issue.
+ *
+ * To work around it, we apply rx skb with 64 bytes
+ * longer space, and offset the address whenever
+ * 0x....fc0 is detected.
+ */
+ if (likely(napi_mode))
+ skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
+ else
+ skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
break;
}
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+ skb_reserve(skb, 64);
+
/*
* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
@@ -2186,7 +2152,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
while (index != tpd_ring->next_to_use) {
tpd = ATL1C_TPD_DESC(tpd_ring, index);
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(adpt->pdev, buffer_info);
+ atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
if (++index == tpd_ring->count)
index = 0;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 41a6098eb0c2..29b04a274d07 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -1344,17 +1344,15 @@ of_put_exit:
return ret;
}
-static int bcmasp_remove(struct platform_device *pdev)
+static void bcmasp_remove(struct platform_device *pdev)
{
struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev);
if (!priv)
- return 0;
+ return;
priv->destroy_wol(priv);
bcmasp_remove_intfs(priv);
-
- return 0;
}
static void bcmasp_shutdown(struct platform_device *pdev)
@@ -1428,7 +1426,7 @@ static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
static struct platform_driver bcmasp_driver = {
.probe = bcmasp_probe,
- .remove = bcmasp_remove,
+ .remove_new = bcmasp_remove,
.shutdown = bcmasp_shutdown,
.driver = {
.name = "brcm,asp-v2",
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 33d86683af50..3e7c8671cd11 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -768,7 +768,7 @@ err_dma_free:
return err;
}
-static int bcm4908_enet_remove(struct platform_device *pdev)
+static void bcm4908_enet_remove(struct platform_device *pdev)
{
struct bcm4908_enet *enet = platform_get_drvdata(pdev);
@@ -776,8 +776,6 @@ static int bcm4908_enet_remove(struct platform_device *pdev)
netif_napi_del(&enet->rx_ring.napi);
netif_napi_del(&enet->tx_ring.napi);
bcm4908_enet_dma_free(enet);
-
- return 0;
}
static const struct of_device_id bcm4908_enet_of_match[] = {
@@ -791,7 +789,7 @@ static struct platform_driver bcm4908_enet_driver = {
.of_match_table = bcm4908_enet_of_match,
},
.probe = bcm4908_enet_probe,
- .remove = bcm4908_enet_remove,
+ .remove_new = bcm4908_enet_remove,
};
module_platform_driver(bcm4908_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a741070f1f9a..105afde5dbe1 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1902,7 +1902,7 @@ out:
/*
* exit func, stops hardware and unregisters netdevice
*/
-static int bcm_enet_remove(struct platform_device *pdev)
+static void bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
@@ -1932,12 +1932,11 @@ static int bcm_enet_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->mac_clk);
free_netdev(dev);
- return 0;
}
static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
- .remove = bcm_enet_remove,
+ .remove_new = bcm_enet_remove,
.driver = {
.name = "bcm63xx_enet",
},
@@ -2739,7 +2738,7 @@ out:
/* exit func, stops hardware and unregisters netdevice */
-static int bcm_enetsw_remove(struct platform_device *pdev)
+static void bcm_enetsw_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
@@ -2752,12 +2751,11 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->mac_clk);
free_netdev(dev);
- return 0;
}
static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
- .remove = bcm_enetsw_remove,
+ .remove_new = bcm_enetsw_remove,
.driver = {
.name = "bcm63xx_enetsw",
},
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index bf1611cce974..ab096795e805 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2648,7 +2648,7 @@ err_free_netdev:
return ret;
}
-static int bcm_sysport_remove(struct platform_device *pdev)
+static void bcm_sysport_remove(struct platform_device *pdev)
{
struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2663,8 +2663,6 @@ static int bcm_sysport_remove(struct platform_device *pdev)
of_phy_deregister_fixed_link(dn);
free_netdev(dev);
dev_set_drvdata(&pdev->dev, NULL);
-
- return 0;
}
static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
@@ -2901,7 +2899,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
- .remove = bcm_sysport_remove,
+ .remove_new = bcm_sysport_remove,
.driver = {
.name = "brcm-systemport",
.of_match_table = bcm_sysport_of_match,
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index b4381cd41979..0b21fd5bd457 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -246,13 +246,11 @@ static int bgmac_probe(struct platform_device *pdev)
return bgmac_enet_probe(bgmac);
}
-static int bgmac_remove(struct platform_device *pdev)
+static void bgmac_remove(struct platform_device *pdev)
{
struct bgmac *bgmac = platform_get_drvdata(pdev);
bgmac_enet_remove(bgmac);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -296,7 +294,7 @@ static struct platform_driver bgmac_enet_driver = {
.pm = BGMAC_PM_OPS
},
.probe = bgmac_probe,
- .remove = bgmac_remove,
+ .remove_new = bgmac_remove,
};
module_platform_driver(bgmac_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 2bc2b707d6ee..ba6c239d52fa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
+bnxt_en-$(CONFIG_BNXT_HWMON) += bnxt_hwmon.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7551aa8068f8..b0ca3b319e4f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -52,8 +52,6 @@
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <net/page_pool/helpers.h>
#include <linux/align.h>
#include <net/netdev_queues.h>
@@ -71,6 +69,7 @@
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
+#include "bnxt_hwmon.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
@@ -2130,6 +2129,24 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
return INVALID_HW_RING_ID;
}
+#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
+
+#define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
+
+#define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
+ ((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
+
+#define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
+
static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
{
u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
@@ -2145,6 +2162,40 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
+ u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
+ char *threshold_type;
+ char *dir_str;
+
+ switch (type) {
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
+ threshold_type = "warning";
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
+ threshold_type = "critical";
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
+ threshold_type = "fatal";
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
+ threshold_type = "shutdown";
+ break;
+ default:
+ netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
+ return;
+ }
+ if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
+ dir_str = "above";
+ else
+ dir_str = "below";
+ netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
+ dir_str, threshold_type);
+ netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
+ BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
+ BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
+ bnxt_hwmon_notify_event(bp, type);
+ break;
+ }
default:
netdev_err(bp->dev, "FW reported unknown error type %u\n",
err_type);
@@ -10250,79 +10301,6 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
} while (handle && handle != 0xffff);
}
-#ifdef CONFIG_BNXT_HWMON
-static ssize_t bnxt_show_temp(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct hwrm_temp_monitor_query_output *resp;
- struct hwrm_temp_monitor_query_input *req;
- struct bnxt *bp = dev_get_drvdata(dev);
- u32 len = 0;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
- if (rc)
- return rc;
- resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
- if (!rc)
- len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
- hwrm_req_drop(bp, req);
- if (rc)
- return rc;
- return len;
-}
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
-
-static struct attribute *bnxt_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(bnxt);
-
-static void bnxt_hwmon_close(struct bnxt *bp)
-{
- if (bp->hwmon_dev) {
- hwmon_device_unregister(bp->hwmon_dev);
- bp->hwmon_dev = NULL;
- }
-}
-
-static void bnxt_hwmon_open(struct bnxt *bp)
-{
- struct hwrm_temp_monitor_query_input *req;
- struct pci_dev *pdev = bp->pdev;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
- if (!rc)
- rc = hwrm_req_send_silent(bp, req);
- if (rc == -EACCES || rc == -EOPNOTSUPP) {
- bnxt_hwmon_close(bp);
- return;
- }
-
- if (bp->hwmon_dev)
- return;
-
- bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
- DRV_MODULE_NAME, bp,
- bnxt_groups);
- if (IS_ERR(bp->hwmon_dev)) {
- bp->hwmon_dev = NULL;
- dev_warn(&pdev->dev, "Cannot register hwmon device\n");
- }
-}
-#else
-static void bnxt_hwmon_close(struct bnxt *bp)
-{
-}
-
-static void bnxt_hwmon_open(struct bnxt *bp)
-{
-}
-#endif
-
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -10651,7 +10629,6 @@ static int bnxt_open(struct net_device *dev)
bnxt_reenable_sriov(bp);
}
}
- bnxt_hwmon_open(bp);
}
return rc;
@@ -10736,7 +10713,6 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
bnxt_hwrm_if_change(bp, false);
@@ -12237,6 +12213,20 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
+/* FW that pre-reserves 1 VNIC per function */
+static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
+{
+ u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
+ return true;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
+ return true;
+ return false;
+}
+
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -12293,6 +12283,9 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (rc)
return -ENODEV;
+ if (bnxt_fw_pre_resv_vnics(bp))
+ bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
+
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -12300,6 +12293,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (bp->fw_cap & BNXT_FW_CAP_PTP)
__bnxt_hwrm_ptp_qcfg(bp);
bnxt_dcb_init(bp);
+ bnxt_hwmon_init(bp);
return 0;
}
@@ -13205,6 +13199,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_hwmon_uninit(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
kfree(bp->ptp_cfg);
@@ -13801,6 +13796,7 @@ init_err_dl:
init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_hwmon_uninit(bp);
bnxt_ethtool_free(bp);
bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 84cbcfa61bc1..9ce0193798d4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2013,6 +2013,9 @@ struct bnxt {
#define BNXT_FW_CAP_RING_MONITOR BIT_ULL(30)
#define BNXT_FW_CAP_DBG_QCAPS BIT_ULL(31)
#define BNXT_FW_CAP_PTP BIT_ULL(32)
+ #define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33)
+ #define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34)
+ #define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
u32 fw_dbg_cap;
@@ -2053,6 +2056,7 @@ struct bnxt {
#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48)
+#define BNXT_FW_BLD(bp) (((bp)->fw_ver_code >> 16) & 0xffff)
u16 vxlan_fw_dst_port_id;
u16 nge_fw_dst_port_id;
@@ -2185,7 +2189,13 @@ struct bnxt {
struct bnxt_tc_info *tc_info;
struct list_head tc_indr_block_list;
struct dentry *debugfs_pdev;
+#ifdef CONFIG_BNXT_HWMON
struct device *hwmon_dev;
+ u8 warn_thresh_temp;
+ u8 crit_thresh_temp;
+ u8 fatal_thresh_temp;
+ u8 shutdown_thresh_temp;
+#endif
enum board_idx board_idx;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 3ae8e8af8ab3..d5fad5a3cdd1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2022 Broadcom Inc.
+ * Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -191,6 +191,11 @@ struct cmd_nums {
#define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
#define HWRM_QUEUE_GLOBAL_CFG 0x86UL
#define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
+ #define HWRM_QUEUE_QCAPS 0x8cUL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -315,6 +320,7 @@ struct cmd_nums {
#define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
#define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
#define HWRM_CFA_TLS_FILTER_FREE 0x129UL
+ #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -383,6 +389,9 @@ struct cmd_nums {
#define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
#define HWRM_FUNC_SYNCE_CFG 0x1abUL
#define HWRM_FUNC_SYNCE_QCFG 0x1acUL
+ #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
+ #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
+ #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -408,10 +417,10 @@ struct cmd_nums {
#define HWRM_MFG_SELFTEST_QLIST 0x216UL
#define HWRM_MFG_SELFTEST_EXEC 0x217UL
#define HWRM_STAT_GENERIC_QSTATS 0x218UL
+ #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
- #define HWRM_TF_SESSION_ATTACH 0x2c7UL
#define HWRM_TF_SESSION_REGISTER 0x2c8UL
#define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
#define HWRM_TF_SESSION_CLOSE 0x2caUL
@@ -426,14 +435,6 @@ struct cmd_nums {
#define HWRM_TF_TBL_TYPE_GET 0x2daUL
#define HWRM_TF_TBL_TYPE_SET 0x2dbUL
#define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
- #define HWRM_TF_CTXT_MEM_ALLOC 0x2e2UL
- #define HWRM_TF_CTXT_MEM_FREE 0x2e3UL
- #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
- #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
- #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
- #define HWRM_TF_EXT_EM_OP 0x2e7UL
- #define HWRM_TF_EXT_EM_CFG 0x2e8UL
- #define HWRM_TF_EXT_EM_QCFG 0x2e9UL
#define HWRM_TF_EM_INSERT 0x2eaUL
#define HWRM_TF_EM_DELETE 0x2ebUL
#define HWRM_TF_EM_HASH_INSERT 0x2ecUL
@@ -465,6 +466,14 @@ struct cmd_nums {
#define HWRM_TFC_IDX_TBL_GET 0x390UL
#define HWRM_TFC_IDX_TBL_FREE 0x391UL
#define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
+ #define HWRM_TFC_TCAM_SET 0x393UL
+ #define HWRM_TFC_TCAM_GET 0x394UL
+ #define HWRM_TFC_TCAM_ALLOC 0x395UL
+ #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
+ #define HWRM_TFC_TCAM_FREE 0x397UL
+ #define HWRM_TFC_IF_TBL_SET 0x398UL
+ #define HWRM_TFC_IF_TBL_GET 0x399UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -494,6 +503,8 @@ struct cmd_nums {
#define HWRM_DBG_USEQ_RUN 0xff29UL
#define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
#define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
+ #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
#define HWRM_NVM_DEFRAG 0xffecUL
#define HWRM_NVM_REQ_ARBITRATION 0xffedUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
@@ -540,6 +551,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_BUSY 0x10UL
#define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
#define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -571,8 +583,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 118
-#define HWRM_VERSION_STR "1.10.2.118"
+#define HWRM_VERSION_RSVD 171
+#define HWRM_VERSION_STR "1.10.2.171"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -761,51 +773,53 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x49UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
u8 opaque_v;
#define ASYNC_EVENT_CMPL_V 0x1UL
@@ -1011,6 +1025,7 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
};
/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
@@ -1402,6 +1417,45 @@ struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
};
+/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_thermal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1502,7 +1556,7 @@ struct hwrm_func_vf_free_output {
u8 valid;
};
-/* hwrm_func_vf_cfg_input (size:448b/56B) */
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
struct hwrm_func_vf_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1510,20 +1564,22 @@ struct hwrm_func_vf_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
__le16 mtu;
__le16 guest_vlan;
__le16 async_event_cr;
@@ -1547,8 +1603,12 @@ struct hwrm_func_vf_cfg_input {
__le16 num_vnics;
__le16 num_stat_ctxs;
__le16 num_hw_ring_grps;
- __le16 num_tx_key_ctxs;
- __le16 num_rx_key_ctxs;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le16 num_msix;
+ u8 unused[2];
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
};
/* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -1572,7 +1632,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:768b/96B) */
+/* hwrm_func_qcaps_output (size:896b/112B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1686,6 +1746,11 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1695,7 +1760,15 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ u8 key_xid_partition_cap;
+ #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_TKC 0x1UL
+ #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_RKC 0x2UL
+ #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_TKC 0x4UL
+ #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_RKC 0x8UL
u8 unused_1;
+ u8 device_serial_number[8];
+ __le16 ctxs_per_partition;
+ u8 unused_2[5];
u8 valid;
};
@@ -1710,7 +1783,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:896b/112B) */
+/* hwrm_func_qcfg_output (size:1024b/128B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1870,19 +1943,24 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__le16 host_mtu;
- __le16 alloc_tx_key_ctxs;
- __le16 alloc_rx_key_ctxs;
+ u8 unused_3[2];
+ u8 unused_4[2];
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
u8 kdnet_pcie_function;
__le16 port_kdnet_fid;
- u8 unused_3;
+ u8 unused_5[2];
+ __le32 alloc_tx_key_ctxs;
+ __le32 alloc_rx_key_ctxs;
+ u8 lag_id;
+ u8 parif;
+ u8 unused_6[5];
u8 valid;
};
-/* hwrm_func_cfg_input (size:960b/120B) */
+/* hwrm_func_cfg_input (size:1088b/136B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2061,8 +2139,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__be16 tpid;
__le16 host_mtu;
- __le16 num_tx_key_ctxs;
- __le16 num_rx_key_ctxs;
+ u8 unused_0[4];
__le32 enables2;
#define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
#define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
@@ -2083,7 +2160,12 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
#define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
#define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
- u8 unused_0[6];
+ u8 unused_1[2];
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 unused_2;
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2390,7 +2472,11 @@ struct hwrm_func_drv_qver_input {
__le64 resp_addr;
__le32 reserved;
__le16 fid;
- u8 unused_0[2];
+ u8 driver_type;
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
+ u8 unused_0;
};
/* hwrm_func_drv_qver_output (size:256b/32B) */
@@ -2435,7 +2521,7 @@ struct hwrm_func_resource_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_resource_qcaps_output (size:512b/64B) */
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
struct hwrm_func_resource_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -2467,15 +2553,20 @@ struct hwrm_func_resource_qcaps_output {
__le16 max_tx_scheduler_inputs;
__le16 flags;
#define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_tx_key_ctxs;
- __le16 max_tx_key_ctxs;
- __le16 min_rx_key_ctxs;
- __le16 max_rx_key_ctxs;
- u8 unused_0[5];
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+ u8 unused_0[3];
u8 valid;
};
-/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
struct hwrm_func_vf_resource_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2502,14 +2593,18 @@ struct hwrm_func_vf_resource_cfg_input {
__le16 max_hw_ring_grps;
__le16 flags;
#define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_tx_key_ctxs;
- __le16 max_tx_key_ctxs;
- __le16 min_rx_key_ctxs;
- __le16 max_rx_key_ctxs;
- u8 unused_0[2];
-};
-
-/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:320b/40B) */
struct hwrm_func_vf_resource_cfg_output {
__le16 error_code;
__le16 req_type;
@@ -2523,9 +2618,9 @@ struct hwrm_func_vf_resource_cfg_output {
__le16 reserved_vnics;
__le16 reserved_stat_ctx;
__le16 reserved_hw_ring_grps;
- __le16 reserved_tx_key_ctxs;
- __le16 reserved_rx_key_ctxs;
- u8 unused_0[3];
+ __le32 reserved_tx_key_ctxs;
+ __le32 reserved_rx_key_ctxs;
+ u8 unused_0[7];
u8 valid;
};
@@ -2592,7 +2687,8 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 rkc_entry_size;
__le32 tkc_max_entries;
__le32 rkc_max_entries;
- u8 rsvd1[7];
+ __le16 fast_qpmd_qp_num_entries;
+ u8 rsvd1[5];
u8 valid;
};
@@ -2630,27 +2726,28 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
u8 qpc_pg_size_qpc_lvl;
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
@@ -3047,7 +3144,7 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
#define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
#define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
- u8 rsvd[2];
+ __le16 qp_num_fast_qpmd_entries;
};
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -3477,6 +3574,8 @@ struct hwrm_func_backing_store_cfg_v2_input {
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3546,6 +3645,8 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3559,22 +3660,24 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
__le32 flags;
__le64 page_dir;
@@ -3609,7 +3712,8 @@ struct hwrm_func_backing_store_qcfg_v2_output {
struct qpc_split_entries {
__le32 qp_num_l2_entries;
__le32 qp_num_qp1_entries;
- __le32 rsvd[2];
+ __le32 qp_num_fast_qpmd_entries;
+ __le32 rsvd;
};
/* srq_split_entries (size:128b/16B) */
@@ -3666,6 +3770,8 @@ struct hwrm_func_backing_store_qcaps_v2_input {
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
@@ -3696,13 +3802,16 @@ struct hwrm_func_backing_store_qcaps_v2_output {
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
__le32 flags;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
__le32 instance_bit_map;
u8 ctx_init_value;
u8 ctx_init_offset;
@@ -3712,7 +3821,13 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le32 min_num_entries;
__le16 next_valid_type;
u8 subtype_valid_cnt;
- u8 rsvd2;
+ u8 exact_cnt_bit_map;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
__le32 split_entry_0;
__le32 split_entry_1;
__le32 split_entry_2;
@@ -4599,7 +4714,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:3776b/472B) */
+/* rx_port_stats_ext (size:3904b/488B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -4660,6 +4775,8 @@ struct rx_port_stats_ext {
__le64 rx_discard_packets_cos7;
__le64 rx_fec_corrected_blocks;
__le64 rx_fec_uncorrectable_blocks;
+ __le64 rx_filter_miss;
+ __le64 rx_fec_symbol_err;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -6092,6 +6209,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -6181,12 +6299,16 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
};
-/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
struct hwrm_vnic_tpa_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -6208,6 +6330,7 @@ struct hwrm_vnic_tpa_cfg_input {
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
__le16 vnic_id;
__le16 max_agg_segs;
#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
@@ -6227,6 +6350,25 @@ struct hwrm_vnic_tpa_cfg_input {
u8 unused_0[2];
__le32 max_agg_timer;
__le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_1[4];
};
/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
@@ -6282,7 +6424,25 @@ struct hwrm_vnic_tpa_qcfg_output {
#define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
__le32 max_agg_timer;
__le32 min_agg_len;
- u8 unused_0[7];
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_0[3];
u8 valid;
};
@@ -6317,8 +6477,9 @@ struct hwrm_vnic_rss_cfg_input {
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
u8 flags;
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
u8 ring_select_mode;
#define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
#define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
@@ -6480,14 +6641,15 @@ struct hwrm_ring_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -6541,7 +6703,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- __le16 unused_3;
+ __le16 steering_tag;
__le32 reserved3;
__le32 stat_ctx_id;
__le32 reserved4;
@@ -6917,6 +7079,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_4;
@@ -7099,6 +7262,7 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 tunnel_flags;
@@ -7233,7 +7397,8 @@ struct hwrm_cfa_encap_record_alloc_input {
#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE
u8 unused_0[3];
__le32 encap_data[20];
};
@@ -7338,6 +7503,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 pri_hint;
@@ -7485,6 +7651,7 @@ struct hwrm_cfa_decap_filter_alloc_input {
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_0;
@@ -7628,6 +7795,7 @@ struct hwrm_cfa_flow_alloc_input {
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
};
@@ -8053,8 +8221,11 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI
- u8 unused_0[7];
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE
+ u8 tunnel_next_proto;
+ u8 unused_0[6];
};
/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
@@ -8094,10 +8265,12 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI
- u8 unused_0;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE
+ u8 tunnel_next_proto;
__be16 tunnel_dst_port_val;
- u8 unused_1[4];
+ u8 unused_0[4];
};
/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
@@ -8141,10 +8314,12 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI
- u8 unused_0;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE
+ u8 tunnel_next_proto;
__le16 tunnel_dst_port_id;
- u8 unused_1[4];
+ u8 unused_0[4];
};
/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
@@ -8212,7 +8387,7 @@ struct ctx_hw_stats_ext {
__le64 rx_tpa_events;
};
-/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
+/* hwrm_stat_ctx_alloc_input (size:320b/40B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8225,6 +8400,10 @@ struct hwrm_stat_ctx_alloc_input {
#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
u8 unused_0;
__le16 stats_dma_length;
+ __le16 flags;
+ #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ __le16 steering_tag;
+ __le32 unused_1;
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -8432,7 +8611,7 @@ struct hwrm_stat_generic_qstats_output {
u8 valid;
};
-/* generic_sw_hw_stats (size:1216b/152B) */
+/* generic_sw_hw_stats (size:1408b/176B) */
struct generic_sw_hw_stats {
__le64 pcie_statistics_tx_tlp;
__le64 pcie_statistics_rx_tlp;
@@ -8453,6 +8632,9 @@ struct generic_sw_hw_stats {
__le64 cache_miss_count_cfcs;
__le64 cache_miss_count_cfcc;
__le64 cache_miss_count_cfcm;
+ __le64 hw_db_recov_dbs_dropped;
+ __le64 hw_db_recov_drops_serviced;
+ __le64 hw_db_recov_dbs_recovered;
};
/* hwrm_fw_reset_input (size:192b/24B) */
@@ -8876,7 +9058,7 @@ struct hwrm_temp_monitor_query_input {
__le64 resp_addr;
};
-/* hwrm_temp_monitor_query_output (size:128b/16B) */
+/* hwrm_temp_monitor_query_output (size:192b/24B) */
struct hwrm_temp_monitor_query_output {
__le16 error_code;
__le16 req_type;
@@ -8886,14 +9068,20 @@ struct hwrm_temp_monitor_query_output {
u8 phy_temp;
u8 om_temp;
u8 flags;
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
u8 temp2;
u8 phy_temp2;
u8 om_temp2;
+ u8 warn_threshold;
+ u8 critical_threshold;
+ u8 fatal_threshold;
+ u8 shutdown_threshold;
+ u8 unused_0[4];
u8 valid;
};
@@ -9317,7 +9505,8 @@ struct hwrm_dbg_ring_info_get_output {
__le32 producer_index;
__le32 consumer_index;
__le32 cag_vector_ctrl;
- u8 unused_0[3];
+ __le16 st_tag;
+ u8 unused_0;
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
new file mode 100644
index 000000000000..e48094043c3b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
@@ -0,0 +1,241 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2023 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/pci.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_hwmon.h"
+
+void bnxt_hwmon_notify_event(struct bnxt *bp, u32 type)
+{
+ u32 attr;
+
+ if (!bp->hwmon_dev)
+ return;
+
+ switch (type) {
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
+ attr = hwmon_temp_max_alarm;
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
+ attr = hwmon_temp_crit_alarm;
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
+ attr = hwmon_temp_emergency_alarm;
+ break;
+ default:
+ return;
+ }
+
+ hwmon_notify_event(&bp->pdev->dev, hwmon_temp, attr, 0);
+}
+
+static int bnxt_hwrm_temp_query(struct bnxt *bp, u8 *temp)
+{
+ struct hwrm_temp_monitor_query_output *resp;
+ struct hwrm_temp_monitor_query_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
+ if (rc)
+ return rc;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (rc)
+ goto drop_req;
+
+ if (temp) {
+ *temp = resp->temp;
+ } else if (resp->flags &
+ TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE) {
+ bp->fw_cap |= BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED;
+ bp->warn_thresh_temp = resp->warn_threshold;
+ bp->crit_thresh_temp = resp->critical_threshold;
+ bp->fatal_thresh_temp = resp->fatal_threshold;
+ bp->shutdown_thresh_temp = resp->shutdown_threshold;
+ }
+drop_req:
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static umode_t bnxt_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct bnxt *bp = _data;
+
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_emergency:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_emergency_alarm:
+ if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED))
+ return 0;
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int bnxt_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u8 temp = 0;
+ int rc;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp * 1000;
+ return rc;
+ case hwmon_temp_max:
+ *val = bp->warn_thresh_temp * 1000;
+ return 0;
+ case hwmon_temp_crit:
+ *val = bp->crit_thresh_temp * 1000;
+ return 0;
+ case hwmon_temp_emergency:
+ *val = bp->fatal_thresh_temp * 1000;
+ return 0;
+ case hwmon_temp_max_alarm:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp >= bp->warn_thresh_temp;
+ return rc;
+ case hwmon_temp_crit_alarm:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp >= bp->crit_thresh_temp;
+ return rc;
+ case hwmon_temp_emergency_alarm:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp >= bp->fatal_thresh_temp;
+ return rc;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *bnxt_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_EMERGENCY | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops bnxt_hwmon_ops = {
+ .is_visible = bnxt_hwmon_is_visible,
+ .read = bnxt_hwmon_read,
+};
+
+static const struct hwmon_chip_info bnxt_hwmon_chip_info = {
+ .ops = &bnxt_hwmon_ops,
+ .info = bnxt_hwmon_info,
+};
+
+static ssize_t temp1_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt *bp = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", bp->shutdown_thresh_temp * 1000);
+}
+
+static ssize_t temp1_shutdown_alarm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u8 temp;
+ int rc;
+
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (rc)
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", temp >= bp->shutdown_thresh_temp);
+}
+
+static DEVICE_ATTR_RO(temp1_shutdown);
+static DEVICE_ATTR_RO(temp1_shutdown_alarm);
+
+static struct attribute *bnxt_temp_extra_attrs[] = {
+ &dev_attr_temp1_shutdown.attr,
+ &dev_attr_temp1_shutdown_alarm.attr,
+ NULL,
+};
+
+static umode_t bnxt_temp_extra_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct bnxt *bp = dev_get_drvdata(dev);
+
+ /* Shutdown temperature setting in NVM is optional */
+ if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED) ||
+ !bp->shutdown_thresh_temp)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group bnxt_temp_extra_group = {
+ .attrs = bnxt_temp_extra_attrs,
+ .is_visible = bnxt_temp_extra_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(bnxt_temp_extra);
+
+void bnxt_hwmon_uninit(struct bnxt *bp)
+{
+ if (bp->hwmon_dev) {
+ hwmon_device_unregister(bp->hwmon_dev);
+ bp->hwmon_dev = NULL;
+ }
+}
+
+void bnxt_hwmon_init(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int rc;
+
+ /* temp1_xxx is only sensor, ensure not registered if it will fail */
+ rc = bnxt_hwrm_temp_query(bp, NULL);
+ if (rc == -EACCES || rc == -EOPNOTSUPP) {
+ bnxt_hwmon_uninit(bp);
+ return;
+ }
+
+ if (bp->hwmon_dev)
+ return;
+
+ bp->hwmon_dev = hwmon_device_register_with_info(&pdev->dev,
+ DRV_MODULE_NAME, bp,
+ &bnxt_hwmon_chip_info,
+ bnxt_temp_extra_groups);
+ if (IS_ERR(bp->hwmon_dev)) {
+ bp->hwmon_dev = NULL;
+ dev_warn(&pdev->dev, "Cannot register hwmon device\n");
+ }
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h
new file mode 100644
index 000000000000..76d9f599ebc0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h
@@ -0,0 +1,30 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2023 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HWMON_H
+#define BNXT_HWMON_H
+
+#ifdef CONFIG_BNXT_HWMON
+void bnxt_hwmon_notify_event(struct bnxt *bp, u32 type);
+void bnxt_hwmon_uninit(struct bnxt *bp);
+void bnxt_hwmon_init(struct bnxt *bp);
+#else
+static inline void bnxt_hwmon_notify_event(struct bnxt *bp, u32 type)
+{
+}
+
+static inline void bnxt_hwmon_uninit(struct bnxt *bp)
+{
+}
+
+static inline void bnxt_hwmon_init(struct bnxt *bp)
+{
+}
+#endif
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index dde327f2c57e..1f925d247244 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -550,7 +550,6 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
- vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
@@ -572,11 +571,20 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
vf_rx_rings /= num_vfs;
- vf_vnics /= num_vfs;
+ if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) &&
+ vf_vnics >= pf->max_vfs) {
+ /* Take into account that FW has pre-reserved 1 VNIC for
+ * each pf->max_vfs.
+ */
+ vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs;
+ } else {
+ vf_vnics /= num_vfs;
+ }
vf_stat_ctx /= num_vfs;
vf_ring_grps /= num_vfs;
vf_rss /= num_vfs;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req->min_tx_rings = cpu_to_le16(vf_tx_rings);
req->min_rx_rings = cpu_to_le16(vf_rx_rings);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 24bade875ca6..91f3a7e78d65 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -4164,7 +4164,7 @@ err:
return err;
}
-static int bcmgenet_remove(struct platform_device *pdev)
+static void bcmgenet_remove(struct platform_device *pdev)
{
struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
@@ -4172,8 +4172,6 @@ static int bcmgenet_remove(struct platform_device *pdev)
unregister_netdev(priv->dev);
bcmgenet_mii_exit(priv->dev);
free_netdev(priv->dev);
-
- return 0;
}
static void bcmgenet_shutdown(struct platform_device *pdev)
@@ -4352,7 +4350,7 @@ MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
- .remove = bcmgenet_remove,
+ .remove_new = bcmgenet_remove,
.shutdown = bcmgenet_shutdown,
.driver = {
.name = "bcmgenet",
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 3a6763c5e8b3..fcf8485f3446 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2593,7 +2593,7 @@ out_out:
return err;
}
-static int sbmac_remove(struct platform_device *pldev)
+static void sbmac_remove(struct platform_device *pldev)
{
struct net_device *dev = platform_get_drvdata(pldev);
struct sbmac_softc *sc = netdev_priv(dev);
@@ -2604,13 +2604,11 @@ static int sbmac_remove(struct platform_device *pldev)
mdiobus_free(sc->mii_bus);
iounmap(sc->sbm_base);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
- .remove = sbmac_remove,
+ .remove_new = sbmac_remove,
.driver = {
.name = sbmac_string,
},
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b940dcd3ace6..cebae0f418f2 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5156,7 +5156,7 @@ err_disable_clocks:
return err;
}
-static int macb_remove(struct platform_device *pdev)
+static void macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
@@ -5181,8 +5181,6 @@ static int macb_remove(struct platform_device *pdev)
phylink_destroy(bp->phylink);
free_netdev(dev);
}
-
- return 0;
}
static int __maybe_unused macb_suspend(struct device *dev)
@@ -5398,7 +5396,7 @@ static const struct dev_pm_ops macb_pm_ops = {
static struct platform_driver macb_driver = {
.probe = macb_probe,
- .remove = macb_remove,
+ .remove_new = macb_remove,
.driver = {
.name = "macb",
.of_match_table = of_match_ptr(macb_dt_ids),
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index f4f87dfa9687..5e97f1e4e38e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1820,7 +1820,7 @@ err_alloc:
* changes the link status, releases the DMA descriptor rings,
* unregisters the MDIO bus and unmaps the allocated memory.
*/
-static int xgmac_remove(struct platform_device *pdev)
+static void xgmac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct xgmac_priv *priv = netdev_priv(ndev);
@@ -1840,8 +1840,6 @@ static int xgmac_remove(struct platform_device *pdev)
release_mem_region(res->start, resource_size(res));
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1921,7 +1919,7 @@ static struct platform_driver xgmac_driver = {
.pm = &xgmac_pm_ops,
},
.probe = xgmac_probe,
- .remove = xgmac_remove,
+ .remove_new = xgmac_remove,
};
module_platform_driver(xgmac_driver);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index edde0b8fa49c..007d4b06819e 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1521,7 +1521,7 @@ err:
return result;
}
-static int octeon_mgmt_remove(struct platform_device *pdev)
+static void octeon_mgmt_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct octeon_mgmt *p = netdev_priv(netdev);
@@ -1529,7 +1529,6 @@ static int octeon_mgmt_remove(struct platform_device *pdev)
unregister_netdev(netdev);
of_node_put(p->phy_np);
free_netdev(netdev);
- return 0;
}
static const struct of_device_id octeon_mgmt_match[] = {
@@ -1546,7 +1545,7 @@ static struct platform_driver octeon_mgmt_driver = {
.of_match_table = octeon_mgmt_match,
},
.probe = octeon_mgmt_probe,
- .remove = octeon_mgmt_remove,
+ .remove_new = octeon_mgmt_remove,
};
module_platform_driver(octeon_mgmt_driver);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index ea75f275023f..646ca0bc25bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -76,7 +76,7 @@ struct l2t_data {
atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct rcu_head rcu_head; /* to handle rcu cleanup */
- struct l2t_entry l2tab[];
+ struct l2t_entry l2tab[] __counted_by(nentries);
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 290c1058069a..847c7fc2bbd9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -29,7 +29,7 @@ struct clip_tbl {
atomic_t nfree;
struct list_head ce_free_head;
void *cl_list;
- struct list_head hash_list[];
+ struct list_head hash_list[] __counted_by(clipt_size);
};
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index f59dd4b2ae6f..9050568a034c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -331,6 +331,6 @@ struct cxgb4_link {
struct cxgb4_tc_u32_table {
unsigned int size; /* number of entries in table */
- struct cxgb4_link table[]; /* Jump table */
+ struct cxgb4_link table[] __counted_by(size); /* Jump table */
};
#endif /* __CXGB4_TC_U32_PARSE_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a10a6862a9a4..1e5f5b1a22a6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -59,7 +59,7 @@ struct l2t_data {
rwlock_t lock;
atomic_t nfree; /* number of free entries */
struct l2t_entry *rover; /* starting point for next allocation */
- struct l2t_entry l2tab[]; /* MUST BE LAST */
+ struct l2t_entry l2tab[] __counted_by(l2t_size); /* MUST BE LAST */
};
static inline unsigned int vlan_prio(const struct l2t_entry *e)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 5f8b871d79af..6b3c778815f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -82,7 +82,7 @@ struct sched_class {
struct sched_table { /* per port scheduling table */
u8 sched_size;
- struct sched_class tab[];
+ struct sched_class tab[] __counted_by(sched_size);
};
static inline bool can_sched(struct net_device *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
index 541249d78914..109c1dff563a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -66,7 +66,7 @@ struct smt_entry {
struct smt_data {
unsigned int smt_size;
rwlock_t lock;
- struct smt_entry smtab[];
+ struct smt_entry smtab[] __counted_by(smt_size);
};
struct smt_data *t4_init_smt(void);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index d323c5c23521..0a21a10a791c 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1879,7 +1879,7 @@ free:
return err;
}
-static int cs89x0_platform_remove(struct platform_device *pdev)
+static void cs89x0_platform_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -1889,7 +1889,6 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
*/
unregister_netdev(dev);
free_netdev(dev);
- return 0;
}
static const struct of_device_id __maybe_unused cs89x0_match[] = {
@@ -1904,7 +1903,7 @@ static struct platform_driver cs89x0_driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(cs89x0_match),
},
- .remove = cs89x0_platform_remove,
+ .remove_new = cs89x0_platform_remove,
};
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 8627ab19d470..1c2a540db13d 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -757,7 +757,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
}
-static int ep93xx_eth_remove(struct platform_device *pdev)
+static void ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct ep93xx_priv *ep;
@@ -765,7 +765,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
dev = platform_get_drvdata(pdev);
if (dev == NULL)
- return 0;
+ return;
ep = netdev_priv(dev);
@@ -782,8 +782,6 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
}
free_netdev(dev);
-
- return 0;
}
static int ep93xx_eth_probe(struct platform_device *pdev)
@@ -862,7 +860,7 @@ err_out:
static struct platform_driver ep93xx_eth_driver = {
.probe = ep93xx_eth_probe,
- .remove = ep93xx_eth_remove,
+ .remove_new = ep93xx_eth_remove,
.driver = {
.name = "ep93xx-eth",
},
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 21a70b1f0ac5..887876f35f10 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -556,19 +556,18 @@ static int set_mac_address(struct net_device *dev, void *addr)
MODULE_LICENSE("GPL");
-static int mac89x0_device_remove(struct platform_device *pdev)
+static void mac89x0_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
nubus_writew(0, dev->base_addr + ADD_PORT);
free_netdev(dev);
- return 0;
}
static struct platform_driver mac89x0_platform_driver = {
.probe = mac89x0_device_probe,
- .remove = mac89x0_device_remove,
+ .remove_new = mac89x0_device_remove,
.driver = {
.name = "mac89x0",
},
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index a8b9d1a3e4d5..5423fe26b4ef 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2518,13 +2518,11 @@ unprepare:
return ret;
}
-static int gemini_ethernet_port_remove(struct platform_device *pdev)
+static void gemini_ethernet_port_remove(struct platform_device *pdev)
{
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
-
- return 0;
}
static const struct of_device_id gemini_ethernet_port_of_match[] = {
@@ -2541,7 +2539,7 @@ static struct platform_driver gemini_ethernet_port_driver = {
.of_match_table = gemini_ethernet_port_of_match,
},
.probe = gemini_ethernet_port_probe,
- .remove = gemini_ethernet_port_remove,
+ .remove_new = gemini_ethernet_port_remove,
};
static int gemini_ethernet_probe(struct platform_device *pdev)
@@ -2583,14 +2581,12 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
return devm_of_platform_populate(dev);
}
-static int gemini_ethernet_remove(struct platform_device *pdev)
+static void gemini_ethernet_remove(struct platform_device *pdev)
{
struct gemini_ethernet *geth = platform_get_drvdata(pdev);
geth_cleanup_freeq(geth);
geth->initialized = false;
-
- return 0;
}
static const struct of_device_id gemini_ethernet_of_match[] = {
@@ -2607,7 +2603,7 @@ static struct platform_driver gemini_ethernet_driver = {
.of_match_table = gemini_ethernet_of_match,
},
.probe = gemini_ethernet_probe,
- .remove = gemini_ethernet_remove,
+ .remove_new = gemini_ethernet_remove,
};
static int __init gemini_ethernet_module_init(void)
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 05a89ab6766c..150cc94ae9f8 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1770,8 +1770,7 @@ static const struct dev_pm_ops dm9000_drv_pm_ops = {
.resume = dm9000_drv_resume,
};
-static int
-dm9000_drv_remove(struct platform_device *pdev)
+static void dm9000_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct board_info *dm = to_dm9000_board(ndev);
@@ -1783,7 +1782,6 @@ dm9000_drv_remove(struct platform_device *pdev)
regulator_disable(dm->power_supply);
dev_dbg(&pdev->dev, "released and freed device\n");
- return 0;
}
#ifdef CONFIG_OF
@@ -1801,7 +1799,7 @@ static struct platform_driver dm9000_driver = {
.of_match_table = of_match_ptr(dm9000_of_matches),
},
.probe = dm9000_probe,
- .remove = dm9000_drv_remove,
+ .remove_new = dm9000_drv_remove,
};
module_platform_driver(dm9000_driver);
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index 0ed598dc7569..bd786dfbc066 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -381,7 +381,7 @@ struct mediatable {
unsigned has_reset:6;
u32 csr15dir;
u32 csr15val; /* 21143 NWay setting. */
- struct medialeaf mleaf[];
+ struct medialeaf mleaf[] __counted_by(leafcount);
};
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 151ca9573be9..2a18df3605f1 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -841,7 +841,7 @@ err_out_free_dev:
return err;
}
-static int dnet_remove(struct platform_device *pdev)
+static void dnet_remove(struct platform_device *pdev)
{
struct net_device *dev;
@@ -859,13 +859,11 @@ static int dnet_remove(struct platform_device *pdev)
free_irq(dev->irq, dev);
free_netdev(dev);
}
-
- return 0;
}
static struct platform_driver dnet_driver = {
.probe = dnet_probe,
- .remove = dnet_remove,
+ .remove_new = dnet_remove,
.driver = {
.name = "dnet",
},
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 8b992dc9bb52..77a3fcb821c8 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -2587,7 +2587,7 @@ mdio_init_failed:
return retval;
}
-static int tsnep_remove(struct platform_device *pdev)
+static void tsnep_remove(struct platform_device *pdev)
{
struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
@@ -2603,8 +2603,6 @@ static int tsnep_remove(struct platform_device *pdev)
mdiobus_unregister(adapter->mdiobus);
tsnep_disable_irq(adapter, ECM_INT_ALL);
-
- return 0;
}
static const struct of_device_id tsnep_of_match[] = {
@@ -2619,7 +2617,7 @@ static struct platform_driver tsnep_driver = {
.of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
- .remove = tsnep_remove,
+ .remove_new = tsnep_remove,
};
module_platform_driver(tsnep_driver);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 95cbad198b4b..ad41c9019018 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1254,7 +1254,7 @@ out:
* ethoc_remove - shutdown OpenCores ethernet MAC
* @pdev: platform device
*/
-static int ethoc_remove(struct platform_device *pdev)
+static void ethoc_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct ethoc *priv = netdev_priv(netdev);
@@ -1271,8 +1271,6 @@ static int ethoc_remove(struct platform_device *pdev)
unregister_netdev(netdev);
free_netdev(netdev);
}
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1298,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match);
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
- .remove = ethoc_remove,
+ .remove_new = ethoc_remove,
.suspend = ethoc_suspend,
.resume = ethoc_resume,
.driver = {
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 9135b918dd49..fddfd1dd5070 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -2012,7 +2012,7 @@ err_alloc_etherdev:
return err;
}
-static int ftgmac100_remove(struct platform_device *pdev)
+static void ftgmac100_remove(struct platform_device *pdev)
{
struct net_device *netdev;
struct ftgmac100 *priv;
@@ -2040,7 +2040,6 @@ static int ftgmac100_remove(struct platform_device *pdev)
netif_napi_del(&priv->napi);
free_netdev(netdev);
- return 0;
}
static const struct of_device_id ftgmac100_of_match[] = {
@@ -2051,7 +2050,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
static struct platform_driver ftgmac100_driver = {
.probe = ftgmac100_probe,
- .remove = ftgmac100_remove,
+ .remove_new = ftgmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftgmac100_of_match,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 183069581bc0..003bc9a45c65 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1219,7 +1219,7 @@ err_alloc_etherdev:
return err;
}
-static int ftmac100_remove(struct platform_device *pdev)
+static void ftmac100_remove(struct platform_device *pdev)
{
struct net_device *netdev;
struct ftmac100 *priv;
@@ -1234,7 +1234,6 @@ static int ftmac100_remove(struct platform_device *pdev)
netif_napi_del(&priv->napi);
free_netdev(netdev);
- return 0;
}
static const struct of_device_id ftmac100_of_ids[] = {
@@ -1244,7 +1243,7 @@ static const struct of_device_id ftmac100_of_ids[] = {
static struct platform_driver ftmac100_driver = {
.probe = ftmac100_probe,
- .remove = ftmac100_remove,
+ .remove_new = ftmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftmac100_of_ids
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 35461165de0d..30bec47bc665 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1655,7 +1655,7 @@ out:
rx_ring->stats.bytes += rx_byte_cnt;
if (xdp_redirect_frm_cnt)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 7439739cd81a..a9c2ff22431c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -297,7 +297,7 @@ struct enetc_int_vector {
char name[ENETC_INT_NAME_MAX];
struct enetc_bdr rx_ring;
- struct enetc_bdr tx_ring[];
+ struct enetc_bdr tx_ring[] __counted_by(count_tx_rings);
} ____cacheline_aligned_in_smp;
struct enetc_cls_rule {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 2513b44056c1..b65da49dd926 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -443,7 +443,7 @@ struct enetc_psfp_gate {
u32 num_entries;
refcount_t refcount;
struct hlist_node node;
- struct action_gate_entry entries[];
+ struct action_gate_entry entries[] __counted_by(num_entries);
};
/* Only enable the green color frame now
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 77c8e9cfb445..b83346708881 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1832,7 +1832,7 @@ rx_processing_done:
rxq->bd.cur = bdp;
if (xdp_result & FEC_ENET_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
return pkt_received;
}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 5704b5f57cd0..83b09dcfafc4 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -190,7 +190,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
priv->rx_cfg.num_queues;
priv->stats_report_len = struct_size(priv->stats_report, stats,
- tx_stats_num + rx_stats_num);
+ size_add(tx_stats_num, rx_stats_num));
priv->stats_report =
dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
&priv->stats_report_bus, GFP_KERNEL);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index ecf92a5d56bb..b91e7a06b97f 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -1021,7 +1021,7 @@ init_fail:
return ret;
}
-static int hip04_remove(struct platform_device *pdev)
+static void hip04_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hip04_priv *priv = netdev_priv(ndev);
@@ -1035,8 +1035,6 @@ static int hip04_remove(struct platform_device *pdev)
of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id hip04_mac_match[] = {
@@ -1048,7 +1046,7 @@ MODULE_DEVICE_TABLE(of, hip04_mac_match);
static struct platform_driver hip04_mac_driver = {
.probe = hip04_mac_probe,
- .remove = hip04_remove,
+ .remove_new = hip04_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = hip04_mac_match,
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index cb7b0293fe85..2406263c9dd3 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -893,7 +893,7 @@ out_free_netdev:
return ret;
}
-static int hisi_femac_drv_remove(struct platform_device *pdev)
+static void hisi_femac_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hisi_femac_priv *priv = netdev_priv(ndev);
@@ -904,8 +904,6 @@ static int hisi_femac_drv_remove(struct platform_device *pdev)
phy_disconnect(ndev->phydev);
clk_disable_unprepare(priv->clk);
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -961,7 +959,7 @@ static struct platform_driver hisi_femac_driver = {
.of_match_table = hisi_femac_match,
},
.probe = hisi_femac_drv_probe,
- .remove = hisi_femac_drv_remove,
+ .remove_new = hisi_femac_drv_remove,
#ifdef CONFIG_PM
.suspend = hisi_femac_drv_suspend,
.resume = hisi_femac_drv_resume,
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 26d22bb04b87..506fa3d8bbee 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1282,7 +1282,7 @@ out_free_netdev:
return ret;
}
-static int hix5hd2_dev_remove(struct platform_device *pdev)
+static void hix5hd2_dev_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hix5hd2_priv *priv = netdev_priv(ndev);
@@ -1298,8 +1298,6 @@ static int hix5hd2_dev_remove(struct platform_device *pdev)
of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id hix5hd2_of_match[] = {
@@ -1319,7 +1317,7 @@ static struct platform_driver hix5hd2_dev_driver = {
.of_match_table = hix5hd2_of_match,
},
.probe = hix5hd2_dev_probe,
- .remove = hix5hd2_dev_remove,
+ .remove_new = hix5hd2_dev_remove,
};
module_platform_driver(hix5hd2_dev_driver);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index fcaf5132b865..1b67da1f6fa8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3007,7 +3007,7 @@ free_dev:
* hns_dsaf_remove - remove dsaf dev
* @pdev: dasf platform device
*/
-static int hns_dsaf_remove(struct platform_device *pdev)
+static void hns_dsaf_remove(struct platform_device *pdev)
{
struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev);
@@ -3020,8 +3020,6 @@ static int hns_dsaf_remove(struct platform_device *pdev)
hns_dsaf_free(dsaf_dev);
hns_dsaf_free_dev(dsaf_dev);
-
- return 0;
}
static const struct of_device_id g_dsaf_match[] = {
@@ -3033,7 +3031,7 @@ MODULE_DEVICE_TABLE(of, g_dsaf_match);
static struct platform_driver g_dsaf_driver = {
.probe = hns_dsaf_probe,
- .remove = hns_dsaf_remove,
+ .remove_new = hns_dsaf_remove,
.driver = {
.name = DSAF_DRV_NAME,
.of_match_table = g_dsaf_match,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 0f0e16f9afc0..7e00231c1acf 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -92,7 +92,7 @@ struct ppe_common_cb {
u8 comm_index; /*ppe_common index*/
u32 ppe_num;
- struct hns_ppe_cb ppe_cb[];
+ struct hns_ppe_cb ppe_cb[] __counted_by(ppe_num);
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index a9f805925699..c1e9b6997853 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -108,7 +108,7 @@ struct rcb_common_cb {
u32 ring_num;
u32 desc_num; /* desc num per queue*/
- struct ring_pair_cb ring_pair_cb[];
+ struct ring_pair_cb ring_pair_cb[] __counted_by(ring_num);
};
int hns_rcb_buf_size2type(u32 buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 7cf10d1e2b31..0900abf5c508 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2384,7 +2384,7 @@ out_read_prop_fail:
return ret;
}
-static int hns_nic_dev_remove(struct platform_device *pdev)
+static void hns_nic_dev_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -2413,7 +2413,6 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
of_node_put(to_of_node(priv->fwnode));
free_netdev(ndev);
- return 0;
}
static const struct of_device_id hns_enet_of_match[] = {
@@ -2431,7 +2430,7 @@ static struct platform_driver hns_nic_dev_driver = {
.acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
},
.probe = hns_nic_dev_probe,
- .remove = hns_nic_dev_remove,
+ .remove_new = hns_nic_dev_remove,
};
module_platform_driver(hns_nic_dev_driver);
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 409a89d80220..ed73707176c1 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -610,7 +610,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
*
* Return 0 on success, negative on failure
*/
-static int hns_mdio_remove(struct platform_device *pdev)
+static void hns_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus;
@@ -618,7 +618,6 @@ static int hns_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(bus);
platform_set_drvdata(pdev, NULL);
- return 0;
}
static const struct of_device_id hns_mdio_match[] = {
@@ -636,7 +635,7 @@ MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
static struct platform_driver hns_mdio_driver = {
.probe = hns_mdio_probe,
- .remove = hns_mdio_remove,
+ .remove_new = hns_mdio_remove,
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index ad47ac51a139..9b60966736db 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -861,7 +861,7 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
- int err, irqname_len;
+ int err;
txq->netdev = netdev;
txq->sq = sq;
@@ -882,15 +882,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
goto err_alloc_free_sges;
}
- irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
- txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
+ txq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL, "%s_txq%d",
+ netdev->name, qp->q_id);
if (!txq->irq_name) {
err = -ENOMEM;
goto err_alloc_irqname;
}
- sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
-
err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
CI_UPDATE_NO_COALESC);
if (err)
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 54bb4d9a0d1e..813403c2628f 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -153,7 +153,7 @@ probe_failed_free_mpu:
return retval;
}
-static int sni_82596_driver_remove(struct platform_device *pdev)
+static void sni_82596_driver_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct i596_private *lp = netdev_priv(dev);
@@ -164,12 +164,11 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
iounmap(lp->ca);
iounmap(lp->mpu_port);
free_netdev (dev);
- return 0;
}
static struct platform_driver sni_82596_driver = {
.probe = sni_82596_probe,
- .remove = sni_82596_driver_remove,
+ .remove_new = sni_82596_driver_remove,
.driver = {
.name = sni_82596_string,
},
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 0a56e9752464..251dedd55cfb 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -90,7 +90,7 @@ static struct ehea_bcmc_reg_array ehea_bcmc_regs;
static int ehea_probe_adapter(struct platform_device *dev);
-static int ehea_remove(struct platform_device *dev);
+static void ehea_remove(struct platform_device *dev);
static const struct of_device_id ehea_module_device_table[] = {
{
@@ -121,7 +121,7 @@ static struct platform_driver ehea_driver = {
.of_match_table = ehea_device_table,
},
.probe = ehea_probe_adapter,
- .remove = ehea_remove,
+ .remove_new = ehea_remove,
};
void ehea_dump(void *adr, int len, char *msg)
@@ -3471,7 +3471,7 @@ out:
return ret;
}
-static int ehea_remove(struct platform_device *dev)
+static void ehea_remove(struct platform_device *dev)
{
struct ehea_adapter *adapter = platform_get_drvdata(dev);
int i;
@@ -3492,8 +3492,6 @@ static int ehea_remove(struct platform_device *dev)
list_del(&adapter->list);
ehea_update_firmware_handles();
-
- return 0;
}
static int check_module_parm(void)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 0c314bf97480..e6e47b1842ea 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -3253,7 +3253,7 @@ static int emac_probe(struct platform_device *ofdev)
return err;
}
-static int emac_remove(struct platform_device *ofdev)
+static void emac_remove(struct platform_device *ofdev)
{
struct emac_instance *dev = platform_get_drvdata(ofdev);
@@ -3290,8 +3290,6 @@ static int emac_remove(struct platform_device *ofdev)
irq_dispose_mapping(dev->emac_irq);
free_netdev(dev->ndev);
-
- return 0;
}
/* XXX Features in here should be replaced by properties... */
@@ -3319,7 +3317,7 @@ static struct platform_driver emac_driver = {
.of_match_table = emac_match,
},
.probe = emac_probe,
- .remove = emac_remove,
+ .remove_new = emac_remove,
};
static void __init emac_make_bootlist(void)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index c3236b59e7e9..462646d1b817 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -711,7 +711,7 @@ static int mal_probe(struct platform_device *ofdev)
return err;
}
-static int mal_remove(struct platform_device *ofdev)
+static void mal_remove(struct platform_device *ofdev)
{
struct mal_instance *mal = platform_get_drvdata(ofdev);
@@ -740,8 +740,6 @@ static int mal_remove(struct platform_device *ofdev)
NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
mal->bd_dma);
kfree(mal);
-
- return 0;
}
static const struct of_device_id mal_platform_match[] =
@@ -770,7 +768,7 @@ static struct platform_driver mal_of_driver = {
.of_match_table = mal_platform_match,
},
.probe = mal_probe,
- .remove = mal_remove,
+ .remove_new = mal_remove,
};
int __init mal_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index fd437f986edf..e1712fdc3c31 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -273,7 +273,7 @@ static int rgmii_probe(struct platform_device *ofdev)
return rc;
}
-static int rgmii_remove(struct platform_device *ofdev)
+static void rgmii_remove(struct platform_device *ofdev)
{
struct rgmii_instance *dev = platform_get_drvdata(ofdev);
@@ -281,8 +281,6 @@ static int rgmii_remove(struct platform_device *ofdev)
iounmap(dev->base);
kfree(dev);
-
- return 0;
}
static const struct of_device_id rgmii_match[] =
@@ -302,7 +300,7 @@ static struct platform_driver rgmii_driver = {
.of_match_table = rgmii_match,
},
.probe = rgmii_probe,
- .remove = rgmii_remove,
+ .remove_new = rgmii_remove,
};
int __init rgmii_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index aae9a88d95d7..fa3488258ca2 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -130,7 +130,7 @@ static int tah_probe(struct platform_device *ofdev)
return rc;
}
-static int tah_remove(struct platform_device *ofdev)
+static void tah_remove(struct platform_device *ofdev)
{
struct tah_instance *dev = platform_get_drvdata(ofdev);
@@ -138,8 +138,6 @@ static int tah_remove(struct platform_device *ofdev)
iounmap(dev->base);
kfree(dev);
-
- return 0;
}
static const struct of_device_id tah_match[] =
@@ -160,7 +158,7 @@ static struct platform_driver tah_driver = {
.of_match_table = tah_match,
},
.probe = tah_probe,
- .remove = tah_remove,
+ .remove_new = tah_remove,
};
int __init tah_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 6337388ee5f4..26e86cdee2f6 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -278,7 +278,7 @@ static int zmii_probe(struct platform_device *ofdev)
return rc;
}
-static int zmii_remove(struct platform_device *ofdev)
+static void zmii_remove(struct platform_device *ofdev)
{
struct zmii_instance *dev = platform_get_drvdata(ofdev);
@@ -286,8 +286,6 @@ static int zmii_remove(struct platform_device *ofdev)
iounmap(dev->base);
kfree(dev);
-
- return 0;
}
static const struct of_device_id zmii_match[] =
@@ -308,7 +306,7 @@ static struct platform_driver zmii_driver = {
.of_match_table = zmii_match,
},
.probe = zmii_probe,
- .remove = zmii_remove,
+ .remove_new = zmii_remove,
};
int __init zmii_init(void)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 9bc0a9519899..e6684f3cc0ce 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -284,6 +284,7 @@ config ICE
select DIMLIB
select NET_DEVLINK
select PLDMFW
+ select DPLL
help
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
@@ -355,5 +356,17 @@ config IGC
To compile this driver as a module, choose M here. The module
will be called igc.
+config IDPF
+ tristate "Intel(R) Infrastructure Data Path Function Support"
+ depends on PCI_MSI
+ select DIMLIB
+ select PAGE_POOL
+ select PAGE_POOL_STATS
+ help
+ This driver supports Intel(R) Infrastructure Data Path Function
+ devices.
+
+ To compile this driver as a module, choose M here. The module
+ will be called idpf.
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index d80d04132073..dacb481ee5b1 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IAVF) += iavf/
obj-$(CONFIG_FM10K) += fm10k/
obj-$(CONFIG_ICE) += ice/
+obj-$(CONFIG_IDPF) += idpf/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index bd1321bf7e26..77e4ac103866 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -245,6 +245,7 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_errors),
I40E_NETDEV_STAT(tx_errors),
I40E_NETDEV_STAT(rx_dropped),
+ I40E_NETDEV_STAT(rx_missed_errors),
I40E_NETDEV_STAT(tx_dropped),
I40E_NETDEV_STAT(collisions),
I40E_NETDEV_STAT(rx_length_errors),
@@ -321,7 +322,7 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors),
- I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
+ I40E_PF_STAT("port.rx_discards", stats.eth.rx_discards),
I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors),
I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index de7fd43dc11c..4b9788d2ded7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -489,6 +489,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
stats->tx_dropped = vsi_stats->tx_dropped;
stats->rx_errors = vsi_stats->rx_errors;
stats->rx_dropped = vsi_stats->rx_dropped;
+ stats->rx_missed_errors = vsi_stats->rx_missed_errors;
stats->rx_crc_errors = vsi_stats->rx_crc_errors;
stats->rx_length_errors = vsi_stats->rx_length_errors;
}
@@ -680,17 +681,13 @@ i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
struct i40e_eth_stats *stat_offset,
struct i40e_eth_stats *stat)
{
- u64 rx_rdpc, rx_rxerr;
-
i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
- &stat_offset->rx_discards, &rx_rdpc);
+ &stat_offset->rx_discards, &stat->rx_discards);
i40e_stat_update64(hw,
I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
offset_loaded, &stat_offset->rx_discards_other,
- &rx_rxerr);
-
- stat->rx_discards = rx_rdpc + rx_rxerr;
+ &stat->rx_discards_other);
}
/**
@@ -712,9 +709,6 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_errors, &es->tx_errors);
- i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_discards, &es->rx_discards);
i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_unknown_protocol, &es->rx_unknown_protocol);
@@ -971,8 +965,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
ns->tx_errors = es->tx_errors;
ons->multicast = oes->rx_multicast;
ns->multicast = es->rx_multicast;
- ons->rx_dropped = oes->rx_discards;
- ns->rx_dropped = es->rx_discards;
+ ons->rx_dropped = oes->rx_discards_other;
+ ns->rx_dropped = es->rx_discards_other;
+ ons->rx_missed_errors = oes->rx_discards;
+ ns->rx_missed_errors = es->rx_discards;
ons->tx_dropped = oes->tx_discards;
ns->tx_dropped = es->tx_discards;
@@ -16320,11 +16316,15 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_switch_branch_release(pf->veb[i]);
}
- /* Now we can shutdown the PF's VSI, just before we kill
+ /* Now we can shutdown the PF's VSIs, just before we kill
* adminq and hmc.
*/
- if (pf->vsi[pf->lan_vsi])
- i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+ for (i = pf->num_alloc_vsi; i--;)
+ if (pf->vsi[i]) {
+ i40e_vsi_close(pf->vsi[i]);
+ i40e_vsi_release(pf->vsi[i]);
+ pf->vsi[i] = NULL;
+ }
i40e_cloud_filter_exit(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0b3a27f118fb..d680df615ff9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2405,7 +2405,7 @@ void i40e_update_rx_stats(struct i40e_ring *rx_ring,
void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
{
if (xdp_res & I40E_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & I40E_XDP_TX) {
struct i40e_ring *xdp_ring =
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index d3d6415553ed..186b1130dbaf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -4916,7 +4916,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
vf_stats->tx_bytes = stats->tx_bytes;
vf_stats->broadcast = stats->rx_broadcast;
vf_stats->multicast = stats->rx_multicast;
- vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
vf_stats->tx_dropped = stats->tx_discards;
return 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e110ba346185..44daf335e8c5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -298,8 +298,6 @@ struct iavf_adapter {
#define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
#define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
#define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
-#define IAVF_FLAG_PROMISC_ON BIT(13)
-#define IAVF_FLAG_ALLMULTI_ON BIT(14)
#define IAVF_FLAG_LEGACY_RX BIT(15)
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
@@ -325,10 +323,7 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
-#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
-#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
+#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
@@ -365,6 +360,12 @@ struct iavf_adapter {
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ /* Lock to prevent possible clobbering of
+ * current_netdev_promisc_flags
+ */
+ spinlock_t current_netdev_promisc_flags_lock;
+ netdev_features_t current_netdev_promisc_flags;
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -405,6 +406,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN)
#define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_VLAN_V2)
+#define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_CRC)
#define VLAN_V2_FILTERING_ALLOWED(_a) \
(VLAN_V2_ALLOWED((_a)) && \
((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
@@ -551,7 +554,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter);
void iavf_del_ether_addrs(struct iavf_adapter *adapter);
void iavf_add_vlans(struct iavf_adapter *adapter);
void iavf_del_vlans(struct iavf_adapter *adapter);
-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
+void iavf_set_promiscuous(struct iavf_adapter *adapter);
+bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
void iavf_request_stats(struct iavf_adapter *adapter);
int iavf_request_reset(struct iavf_adapter *adapter);
void iavf_get_hena(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 6a2e6d64bc3a..43c47c633162 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1187,6 +1187,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
}
/**
+ * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
+ * @adapter: device specific adapter
+ */
+bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
+{
+ return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
+ (IFF_PROMISC | IFF_ALLMULTI);
+}
+
+/**
* iavf_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
@@ -1199,19 +1209,10 @@ static void iavf_set_rx_mode(struct net_device *netdev)
__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- if (netdev->flags & IFF_PROMISC &&
- !(adapter->flags & IAVF_FLAG_PROMISC_ON))
- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
- else if (!(netdev->flags & IFF_PROMISC) &&
- adapter->flags & IAVF_FLAG_PROMISC_ON)
- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
-
- if (netdev->flags & IFF_ALLMULTI &&
- !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
- else if (!(netdev->flags & IFF_ALLMULTI) &&
- adapter->flags & IAVF_FLAG_ALLMULTI_ON)
- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
+ if (iavf_promiscuous_mode_changed(adapter))
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
+ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
}
/**
@@ -2162,19 +2163,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
- iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
- FLAG_VF_MULTICAST_PROMISC);
- return 0;
- }
-
- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
- iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
- return 0;
- }
- if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
- (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
- iavf_set_promiscuous(adapter, 0);
+ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
+ iavf_set_promiscuous(adapter);
return 0;
}
@@ -4410,6 +4400,9 @@ static int iavf_set_features(struct net_device *netdev,
(features & NETIF_VLAN_OFFLOAD_FEATURES))
iavf_set_vlan_offload_features(adapter, netdev->features,
features);
+ if (CRC_OFFLOAD_ALLOWED(adapter) &&
+ ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
+ iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
return 0;
}
@@ -4531,6 +4524,9 @@ iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
}
}
+ if (CRC_OFFLOAD_ALLOWED(adapter))
+ hw_features |= NETIF_F_RXFCS;
+
return hw_features;
}
@@ -4695,6 +4691,55 @@ iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
}
/**
+ * iavf_fix_strip_features - fix NETDEV CRC and VLAN strip features
+ * @adapter: board private structure
+ * @requested_features: stack requested NETDEV features
+ *
+ * Returns fixed-up features bits
+ **/
+static netdev_features_t
+iavf_fix_strip_features(struct iavf_adapter *adapter,
+ netdev_features_t requested_features)
+{
+ struct net_device *netdev = adapter->netdev;
+ bool crc_offload_req, is_vlan_strip;
+ netdev_features_t vlan_strip;
+ int num_non_zero_vlan;
+
+ crc_offload_req = CRC_OFFLOAD_ALLOWED(adapter) &&
+ (requested_features & NETIF_F_RXFCS);
+ num_non_zero_vlan = iavf_get_num_vlans_added(adapter);
+ vlan_strip = (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX);
+ is_vlan_strip = requested_features & vlan_strip;
+
+ if (!crc_offload_req)
+ return requested_features;
+
+ if (!num_non_zero_vlan && (netdev->features & vlan_strip) &&
+ !(netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
+ requested_features &= ~vlan_strip;
+ netdev_info(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
+ return requested_features;
+ }
+
+ if ((netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
+ requested_features &= ~vlan_strip;
+ if (!(netdev->features & vlan_strip))
+ netdev_info(netdev, "To enable VLAN stripping, first need to enable FCS/CRC stripping");
+
+ return requested_features;
+ }
+
+ if (num_non_zero_vlan && is_vlan_strip &&
+ !(netdev->features & NETIF_F_RXFCS)) {
+ requested_features &= ~NETIF_F_RXFCS;
+ netdev_info(netdev, "To disable FCS/CRC stripping, first need to disable VLAN stripping");
+ }
+
+ return requested_features;
+}
+
+/**
* iavf_fix_features - fix up the netdev feature bits
* @netdev: our net device
* @features: desired feature bits
@@ -4706,7 +4751,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- return iavf_fix_netdev_vlan_features(adapter, features);
+ features = iavf_fix_netdev_vlan_features(adapter, features);
+
+ return iavf_fix_strip_features(adapter, features);
}
static const struct net_device_ops iavf_netdev_ops = {
@@ -4970,6 +5017,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->cloud_filter_list_lock);
spin_lock_init(&adapter->fdir_fltr_lock);
spin_lock_init(&adapter->adv_rss_lock);
+ spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 8c5f6096b002..d64c4997136b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -7,8 +7,8 @@
#include "iavf_trace.h"
#include "iavf_prototype.h"
-static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
- u32 td_tag)
+static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
{
return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
@@ -370,8 +370,8 @@ static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
q_vector->arm_wb_state = true;
}
-static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
- struct iavf_ring_container *rc)
+static bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
+ struct iavf_ring_container *rc)
{
return &q_vector->rx == rc;
}
@@ -806,7 +806,7 @@ err:
* @rx_ring: ring to bump
* @val: new head index
**/
-static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
+static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
@@ -828,7 +828,7 @@ static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
*
* Returns the offset value for ring into the data buffer.
*/
-static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
+static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
}
@@ -977,9 +977,9 @@ no_buffers:
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
**/
-static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
- struct sk_buff *skb,
- union iavf_rx_desc *rx_desc)
+static void iavf_rx_checksum(struct iavf_vsi *vsi,
+ struct sk_buff *skb,
+ union iavf_rx_desc *rx_desc)
{
struct iavf_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
@@ -1061,7 +1061,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline int iavf_ptype_to_htype(u8 ptype)
+static int iavf_ptype_to_htype(u8 ptype)
{
struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -1085,10 +1085,10 @@ static inline int iavf_ptype_to_htype(u8 ptype)
* @skb: skb currently being received and modified
* @rx_ptype: Rx packet type
**/
-static inline void iavf_rx_hash(struct iavf_ring *ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_rx_hash(struct iavf_ring *ring,
+ union iavf_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u8 rx_ptype)
{
u32 hash;
const __le64 rss_mask =
@@ -1115,10 +1115,10 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
-static inline
-void iavf_process_skb_fields(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+static void
+iavf_process_skb_fields(struct iavf_ring *rx_ring,
+ union iavf_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
@@ -1662,8 +1662,8 @@ static inline u32 iavf_buildreg_itr(const int type, u16 itr)
* @q_vector: q_vector for which itr is being updated and interrupt enabled
*
**/
-static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
- struct iavf_q_vector *q_vector)
+static void iavf_update_enable_itr(struct iavf_vsi *vsi,
+ struct iavf_q_vector *q_vector)
{
struct iavf_hw *hw = &vsi->back->hw;
u32 intval;
@@ -2275,9 +2275,9 @@ int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
-static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
- struct iavf_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
+ struct iavf_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index f9727e9c3d63..8ce6389b5815 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -142,6 +142,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
@@ -312,6 +313,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.databuffer_size =
ALIGN(adapter->rx_rings[i].rx_buf_len,
BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
+ if (CRC_OFFLOAD_ALLOWED(adapter))
+ vqpi->rxq.crc_disable = !!(adapter->netdev->features &
+ NETIF_F_RXFCS);
vqpi++;
}
@@ -936,14 +940,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
/**
* iavf_set_promiscuous
* @adapter: adapter structure
- * @flags: bitmask to control unicast/multicast promiscuous.
*
* Request that the PF enable promiscuous mode for our VSI.
**/
-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
+void iavf_set_promiscuous(struct iavf_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
struct virtchnl_promisc_info vpi;
- int promisc_all;
+ unsigned int flags;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -952,36 +956,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
return;
}
- promisc_all = FLAG_VF_UNICAST_PROMISC |
- FLAG_VF_MULTICAST_PROMISC;
- if ((flags & promisc_all) == promisc_all) {
- adapter->flags |= IAVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
- dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
- }
+ /* prevent changes to promiscuous flags */
+ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
- if (flags & FLAG_VF_MULTICAST_PROMISC) {
- adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
- dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
- adapter->netdev->name);
+ /* sanity check to prevent duplicate AQ calls */
+ if (!iavf_promiscuous_mode_changed(adapter)) {
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
+ dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
+ /* allow changes to promiscuous flags */
+ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
+ return;
}
- if (!flags) {
- if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
- adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
- }
+ /* there are 2 bits, but only 3 states */
+ if (!(netdev->flags & IFF_PROMISC) &&
+ netdev->flags & IFF_ALLMULTI) {
+ /* State 1 - only multicast promiscuous mode enabled
+ * - !IFF_PROMISC && IFF_ALLMULTI
+ */
+ flags = FLAG_VF_MULTICAST_PROMISC;
+ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
+ adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
+ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ } else if (!(netdev->flags & IFF_PROMISC) &&
+ !(netdev->flags & IFF_ALLMULTI)) {
+ /* State 2 - unicast/multicast promiscuous mode disabled
+ * - !IFF_PROMISC && !IFF_ALLMULTI
+ */
+ flags = 0;
+ adapter->current_netdev_promisc_flags &=
+ ~(IFF_PROMISC | IFF_ALLMULTI);
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ } else {
+ /* State 3 - unicast/multicast promiscuous mode enabled
+ * - IFF_PROMISC && IFF_ALLMULTI
+ * - IFF_PROMISC && !IFF_ALLMULTI
+ */
+ flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
+ adapter->current_netdev_promisc_flags |= IFF_PROMISC;
+ if (netdev->flags & IFF_ALLMULTI)
+ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
+ else
+ adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
- if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
- adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
- dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
- adapter->netdev->name);
- }
+ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
}
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
+
+ /* allow changes to promiscuous flags */
+ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
+
adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 960277d78e09..0679907980f7 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -43,7 +43,7 @@ ice-$(CONFIG_PCI_IOV) += \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
-ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 5022b036ca4f..fcaa5c3b8ec0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -76,6 +76,7 @@
#include "ice_vsi_vlan_ops.h"
#include "ice_gnss.h"
#include "ice_irq.h"
+#include "ice_dpll.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -195,10 +196,13 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
+#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
+
enum ice_feature {
ICE_F_DSCP,
- ICE_F_PTP_EXTTS,
+ ICE_F_PHY_RCLK,
ICE_F_SMA_CTRL,
+ ICE_F_CGU,
ICE_F_GNSS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
@@ -508,6 +512,7 @@ enum ice_pf_flags {
ICE_FLAG_UNPLUG_AUX_DEV,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
+ ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -640,6 +645,7 @@ struct ice_pf {
#define ICE_VF_AGG_NODE_ID_START 65
#define ICE_MAX_VF_AGG_NODES 32
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
+ struct ice_dplls dplls;
};
extern struct workqueue_struct *ice_lag_wq;
@@ -668,6 +674,18 @@ static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
}
/**
+ * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt
+ * @pf: Board private structure
+ *
+ * Return true if this PF should respond to the Tx timestamp interrupt
+ * indication in the miscellaneous OICR interrupt handler.
+ */
+static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf)
+{
+ return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE;
+}
+
+/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to HW struct
* @vsi: pointer to VSI struct, can be NULL
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 29f7a9852aec..51281b58ad72 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1351,6 +1351,30 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
+/* Set PHY recovered clock output (direct 0x0630) */
+struct ice_aqc_set_phy_rec_clk_out {
+ u8 phy_output;
+ u8 port_num;
+#define ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT 0xFF
+ u8 flags;
+#define ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN BIT(0)
+ u8 rsvd;
+ __le32 freq;
+ u8 rsvd2[6];
+ __le16 node_handle;
+};
+
+/* Get PHY recovered clock output (direct 0x0631) */
+struct ice_aqc_get_phy_rec_clk_out {
+ u8 phy_output;
+ u8 port_num;
+#define ICE_AQC_GET_PHY_REC_CLK_OUT_CURR_PORT 0xFF
+ u8 flags;
+#define ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN BIT(0)
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -1367,6 +1391,9 @@ struct ice_aqc_link_topo_params {
#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6
#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7
#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL 9
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX 10
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPS 11
#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4
#define ICE_AQC_LINK_TOPO_NODE_CTX_M \
(0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
@@ -1403,8 +1430,13 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
-#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
-#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 0x25
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY 0x30
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
u8 rsvd[9];
};
@@ -2125,6 +2157,193 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+/* Get CGU abilities command response data structure (indirect 0x0C61) */
+struct ice_aqc_get_cgu_abilities {
+ u8 num_inputs;
+ u8 num_outputs;
+ u8 pps_dpll_idx;
+ u8 eec_dpll_idx;
+ __le32 max_in_freq;
+ __le32 max_in_phase_adj;
+ __le32 max_out_freq;
+ __le32 max_out_phase_adj;
+ u8 cgu_part_num;
+ u8 rsvd[3];
+};
+
+/* Set CGU input config (direct 0x0C62) */
+struct ice_aqc_set_cgu_input_config {
+ u8 input_idx;
+ u8 flags1;
+#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6)
+#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7)
+ u8 flags2;
+#define ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+/* Get CGU input config response descriptor structure (direct 0x0C63) */
+struct ice_aqc_get_cgu_input_config {
+ u8 input_idx;
+ u8 status;
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_LOS BIT(0)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7)
+ u8 type;
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0)
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_GPS BIT(4)
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5)
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_PHY BIT(6)
+ u8 flags1;
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7)
+ __le32 freq;
+ __le32 phase_delay;
+ u8 flags2;
+#define ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+/* Set CGU output config (direct 0x0C64) */
+struct ice_aqc_set_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define ICE_AQC_SET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2)
+#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3)
+#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4)
+ u8 src_sel;
+#define ICE_AQC_SET_CGU_OUT_CFG_DPLL_SRC_SEL ICE_M(0x1F, 0)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+/* Get CGU output config (direct 0x0C65) */
+struct ice_aqc_get_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define ICE_AQC_GET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2)
+ u8 src_sel;
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL \
+ ICE_M(0x1F, ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT)
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE \
+ ICE_M(0x7, ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT)
+ u8 rsvd;
+ __le32 freq;
+ __le32 src_freq;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+/* Get CGU DPLL status (direct 0x0C66) */
+struct ice_aqc_get_cgu_dpll_status {
+ u8 dpll_num;
+ u8 ref_state;
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6)
+ u8 dpll_state;
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO BIT(1)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7)
+ u8 config;
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0)
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT 5
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE \
+ ICE_M(0x7, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT)
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_FREERUN 0
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \
+ ICE_M(0x3, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT)
+ __le32 phase_offset_h;
+ __le32 phase_offset_l;
+ u8 eec_mode;
+#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA
+#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB
+#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+/* Set CGU DPLL config (direct 0x0C67) */
+struct ice_aqc_set_cgu_dpll_config {
+ u8 dpll_num;
+ u8 ref_state;
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6)
+ u8 rsvd;
+ u8 config;
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT 5
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE \
+ ICE_M(0x7, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_FREERUN 0
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \
+ ICE_M(0x3, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT)
+ u8 rsvd2[8];
+ u8 eec_mode;
+ u8 rsvd3[1];
+ __le16 node_handle;
+};
+
+/* Set CGU reference priority (direct 0x0C68) */
+struct ice_aqc_set_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority;
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
+/* Get CGU reference priority (direct 0x0C69) */
+struct ice_aqc_get_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority; /* Valid only in response */
+ u8 rsvd[13];
+};
+
+/* Get CGU info (direct 0x0C6A) */
+struct ice_aqc_get_cgu_info {
+ __le32 cgu_id;
+ __le32 cgu_cfg_ver;
+ __le32 cgu_fw_ver;
+ u8 node_part_num;
+ u8 dev_rev;
+ __le16 node_handle;
+};
+
/* Driver Shared Parameters (direct, 0x0C90) */
struct ice_aqc_driver_shared_params {
u8 set_or_get_op;
@@ -2139,16 +2358,6 @@ struct ice_aqc_driver_shared_params {
__le32 addr_low;
};
-enum ice_aqc_driver_params {
- /* OS clock index for PTP timer Domain 0 */
- ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0,
- /* OS clock index for PTP timer Domain 1 */
- ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1,
-
- /* Add new parameters above */
- ICE_AQC_DRIVER_PARAM_MAX = 16,
-};
-
/* Lan Queue Overflow Event (direct, 0x1001) */
struct ice_aqc_event_lan_overflow {
__le32 prtdcb_ruptq;
@@ -2194,6 +2403,8 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
+ struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
@@ -2234,6 +2445,15 @@ struct ice_aq_desc {
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
+ struct ice_aqc_set_cgu_input_config set_cgu_input_config;
+ struct ice_aqc_get_cgu_input_config get_cgu_input_config;
+ struct ice_aqc_set_cgu_output_config set_cgu_output_config;
+ struct ice_aqc_get_cgu_output_config get_cgu_output_config;
+ struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status;
+ struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config;
+ struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio;
+ struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
+ struct ice_aqc_get_cgu_info get_cgu_info;
struct ice_aqc_driver_shared_params drv_shared_params;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
@@ -2358,6 +2578,8 @@ enum ice_adminq_opc {
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
+ ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
+ ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
@@ -2413,6 +2635,18 @@ enum ice_adminq_opc {
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
+ /* 1588/SyncE commands/events */
+ ice_aqc_opc_get_cgu_abilities = 0x0C61,
+ ice_aqc_opc_set_cgu_input_config = 0x0C62,
+ ice_aqc_opc_get_cgu_input_config = 0x0C63,
+ ice_aqc_opc_set_cgu_output_config = 0x0C64,
+ ice_aqc_opc_get_cgu_output_config = 0x0C65,
+ ice_aqc_opc_get_cgu_dpll_status = 0x0C66,
+ ice_aqc_opc_set_cgu_dpll_config = 0x0C67,
+ ice_aqc_opc_set_cgu_ref_prio = 0x0C68,
+ ice_aqc_opc_get_cgu_ref_prio = 0x0C69,
+ ice_aqc_opc_get_cgu_info = 0x0C6A,
+
ice_aqc_opc_driver_shared_params = 0x0C90,
/* Standalone Commands/Events */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 80deca45ab59..8cbe63401378 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -8,6 +8,7 @@
#include "ice_ptp_hw.h"
#define ICE_PF_RESET_WAIT_COUNT 300
+#define ICE_MAX_NETLIST_SIZE 10
static const char * const ice_link_mode_str_low[] = {
[0] = "100BASE_TX",
@@ -436,6 +437,80 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
}
/**
+ * ice_aq_get_netlist_node
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get netlist node handle.
+ */
+int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
+ return -EINTR;
+
+ if (node_handle)
+ *node_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+/**
+ * ice_find_netlist_node
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Find and return the node handle for a given node type and part number in the
+ * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
+ * otherwise. If node_handle provided, it would be set to found node handle.
+ */
+static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle)
+{
+ struct ice_aqc_get_link_topo cmd;
+ u8 rec_node_part_number;
+ u16 rec_node_handle;
+ u8 idx;
+
+ for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) {
+ int status;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.addr.topo_params.node_type_ctx =
+ (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
+ cmd.addr.topo_params.index = idx;
+
+ status = ice_aq_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ &rec_node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number) {
+ if (node_handle)
+ *node_handle = rec_node_handle;
+ return 0;
+ }
+ }
+
+ return -ENOTBLK;
+}
+
+/**
* ice_is_media_cage_present
* @pi: port information structure
*
@@ -2655,33 +2730,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
- * ice_aq_get_netlist_node
- * @hw: pointer to the hw struct
- * @cmd: get_link_topo AQ structure
- * @node_part_number: output node part number if node found
- * @node_handle: output node handle parameter if node found
- */
-static int
-ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
- u8 *node_part_number, u16 *node_handle)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- desc.params.get_link_topo = *cmd;
-
- if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
- return -EIO;
-
- if (node_handle)
- *node_handle = le16_to_cpu(desc.params.get_link_topo.addr.handle);
- if (node_part_number)
- *node_part_number = desc.params.get_link_topo.node_part_num;
-
- return 0;
-}
-
-/**
* ice_is_pf_c827 - check if pf contains c827 phy
* @hw: pointer to the hw struct
*/
@@ -2716,6 +2764,82 @@ bool ice_is_pf_c827(struct ice_hw *hw)
}
/**
+ * ice_is_phy_rclk_in_netlist
+ * @hw: pointer to the hw struct
+ *
+ * Check if the PHY Recovered Clock device is present in the netlist
+ */
+bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
+ ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_is_clock_mux_in_netlist
+ * @hw: pointer to the hw struct
+ *
+ * Check if the Clock Multiplexer device is present in the netlist
+ */
+bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
+ NULL))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_is_cgu_in_netlist - check for CGU presence
+ * @hw: pointer to the hw struct
+ *
+ * Check if the Clock Generation Unit (CGU) device is present in the netlist.
+ * Save the CGU part number in the hw structure for later use.
+ * Return:
+ * * true - cgu is present
+ * * false - cgu is not present
+ */
+bool ice_is_cgu_in_netlist(struct ice_hw *hw)
+{
+ if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
+ NULL)) {
+ hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
+ return true;
+ } else if (!ice_find_netlist_node(hw,
+ ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
+ NULL)) {
+ hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_is_gps_in_netlist
+ * @hw: pointer to the hw struct
+ *
+ * Check if the GPS generic device is present in the netlist
+ */
+bool ice_is_gps_in_netlist(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
+ return false;
+
+ return true;
+}
+
+/**
* ice_aq_list_caps - query function/device capabilities
* @hw: pointer to the HW struct
* @buf: a buffer to hold the capabilities
@@ -4726,11 +4850,11 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- struct ice_aqc_dis_txq_item *qg_list;
+ DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
struct ice_hw *hw;
- u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return -EIO;
@@ -4748,11 +4872,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
return -EIO;
}
- buf_size = struct_size(qg_list, q_id, 1);
- qg_list = kzalloc(buf_size, GFP_KERNEL);
- if (!qg_list)
- return -ENOMEM;
-
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -4785,7 +4904,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
q_ctx->q_teid = ICE_INVAL_TEID;
}
mutex_unlock(&pi->sched_lock);
- kfree(qg_list);
return status;
}
@@ -4954,10 +5072,10 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
- struct ice_aqc_dis_txq_item *qg_list;
+ DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
- u16 qg_size;
int i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
@@ -4965,11 +5083,6 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
hw = pi->hw;
- qg_size = struct_size(qg_list, q_id, 1);
- qg_list = kzalloc(qg_size, GFP_KERNEL);
- if (!qg_list)
- return -ENOMEM;
-
mutex_lock(&pi->sched_lock);
for (i = 0; i < count; i++) {
@@ -4994,7 +5107,395 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}
mutex_unlock(&pi->sched_lock);
- kfree(qg_list);
+ return status;
+}
+
+/**
+ * ice_aq_get_cgu_abilities - get cgu abilities
+ * @hw: pointer to the HW struct
+ * @abilities: CGU abilities
+ *
+ * Get CGU abilities (0x0C61)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_abilities(struct ice_hw *hw,
+ struct ice_aqc_get_cgu_abilities *abilities)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
+ return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
+}
+
+/**
+ * ice_aq_set_input_pin_cfg - set input pin config
+ * @hw: pointer to the HW struct
+ * @input_idx: Input index
+ * @flags1: Input flags
+ * @flags2: Input flags
+ * @freq: Frequency in Hz
+ * @phase_delay: Delay in ps
+ *
+ * Set CGU input config (0x0C62)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
+ u32 freq, s32 phase_delay)
+{
+ struct ice_aqc_set_cgu_input_config *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
+ cmd = &desc.params.set_cgu_input_config;
+ cmd->input_idx = input_idx;
+ cmd->flags1 = flags1;
+ cmd->flags2 = flags2;
+ cmd->freq = cpu_to_le32(freq);
+ cmd->phase_delay = cpu_to_le32(phase_delay);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_input_pin_cfg - get input pin config
+ * @hw: pointer to the HW struct
+ * @input_idx: Input index
+ * @status: Pin status
+ * @type: Pin type
+ * @flags1: Input flags
+ * @flags2: Input flags
+ * @freq: Frequency in Hz
+ * @phase_delay: Delay in ps
+ *
+ * Get CGU input config (0x0C63)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
+ u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
+{
+ struct ice_aqc_get_cgu_input_config *cmd;
+ struct ice_aq_desc desc;
+ int ret;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
+ cmd = &desc.params.get_cgu_input_config;
+ cmd->input_idx = input_idx;
+
+ ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!ret) {
+ if (status)
+ *status = cmd->status;
+ if (type)
+ *type = cmd->type;
+ if (flags1)
+ *flags1 = cmd->flags1;
+ if (flags2)
+ *flags2 = cmd->flags2;
+ if (freq)
+ *freq = le32_to_cpu(cmd->freq);
+ if (phase_delay)
+ *phase_delay = le32_to_cpu(cmd->phase_delay);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_set_output_pin_cfg - set output pin config
+ * @hw: pointer to the HW struct
+ * @output_idx: Output index
+ * @flags: Output flags
+ * @src_sel: Index of DPLL block
+ * @freq: Output frequency
+ * @phase_delay: Output phase compensation
+ *
+ * Set CGU output config (0x0C64)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
+ u8 src_sel, u32 freq, s32 phase_delay)
+{
+ struct ice_aqc_set_cgu_output_config *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
+ cmd = &desc.params.set_cgu_output_config;
+ cmd->output_idx = output_idx;
+ cmd->flags = flags;
+ cmd->src_sel = src_sel;
+ cmd->freq = cpu_to_le32(freq);
+ cmd->phase_delay = cpu_to_le32(phase_delay);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_output_pin_cfg - get output pin config
+ * @hw: pointer to the HW struct
+ * @output_idx: Output index
+ * @flags: Output flags
+ * @src_sel: Internal DPLL source
+ * @freq: Output frequency
+ * @src_freq: Source frequency
+ *
+ * Get CGU output config (0x0C65)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
+ u8 *src_sel, u32 *freq, u32 *src_freq)
+{
+ struct ice_aqc_get_cgu_output_config *cmd;
+ struct ice_aq_desc desc;
+ int ret;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
+ cmd = &desc.params.get_cgu_output_config;
+ cmd->output_idx = output_idx;
+
+ ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!ret) {
+ if (flags)
+ *flags = cmd->flags;
+ if (src_sel)
+ *src_sel = cmd->src_sel;
+ if (freq)
+ *freq = le32_to_cpu(cmd->freq);
+ if (src_freq)
+ *src_freq = le32_to_cpu(cmd->src_freq);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_get_cgu_dpll_status - get dpll status
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_state: Reference clock state
+ * @config: current DPLL config
+ * @dpll_state: current DPLL state
+ * @phase_offset: Phase offset in ns
+ * @eec_mode: EEC_mode
+ *
+ * Get CGU DPLL status (0x0C66)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
+ u8 *dpll_state, u8 *config, s64 *phase_offset,
+ u8 *eec_mode)
+{
+ struct ice_aqc_get_cgu_dpll_status *cmd;
+ const s64 nsec_per_psec = 1000LL;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
+ cmd = &desc.params.get_cgu_dpll_status;
+ cmd->dpll_num = dpll_num;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ *ref_state = cmd->ref_state;
+ *dpll_state = cmd->dpll_state;
+ *config = cmd->config;
+ *phase_offset = le32_to_cpu(cmd->phase_offset_h);
+ *phase_offset <<= 32;
+ *phase_offset += le32_to_cpu(cmd->phase_offset_l);
+ *phase_offset = div64_s64(sign_extend64(*phase_offset, 47),
+ nsec_per_psec);
+ *eec_mode = cmd->eec_mode;
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_set_cgu_dpll_config - set dpll config
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_state: Reference clock state
+ * @config: DPLL config
+ * @eec_mode: EEC mode
+ *
+ * Set CGU DPLL config (0x0C67)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
+ u8 config, u8 eec_mode)
+{
+ struct ice_aqc_set_cgu_dpll_config *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
+ cmd = &desc.params.set_cgu_dpll_config;
+ cmd->dpll_num = dpll_num;
+ cmd->ref_state = ref_state;
+ cmd->config = config;
+ cmd->eec_mode = eec_mode;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_set_cgu_ref_prio - set input reference priority
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_idx: Reference pin index
+ * @ref_priority: Reference input priority
+ *
+ * Set CGU reference priority (0x0C68)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 ref_priority)
+{
+ struct ice_aqc_set_cgu_ref_prio *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
+ cmd = &desc.params.set_cgu_ref_prio;
+ cmd->dpll_num = dpll_num;
+ cmd->ref_idx = ref_idx;
+ cmd->ref_priority = ref_priority;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_cgu_ref_prio - get input reference priority
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_idx: Reference pin index
+ * @ref_prio: Reference input priority
+ *
+ * Get CGU reference priority (0x0C69)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 *ref_prio)
+{
+ struct ice_aqc_get_cgu_ref_prio *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
+ cmd = &desc.params.get_cgu_ref_prio;
+ cmd->dpll_num = dpll_num;
+ cmd->ref_idx = ref_idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *ref_prio = cmd->ref_priority;
+
+ return status;
+}
+
+/**
+ * ice_aq_get_cgu_info - get cgu info
+ * @hw: pointer to the HW struct
+ * @cgu_id: CGU ID
+ * @cgu_cfg_ver: CGU config version
+ * @cgu_fw_ver: CGU firmware version
+ *
+ * Get CGU info (0x0C6A)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
+ u32 *cgu_fw_ver)
+{
+ struct ice_aqc_get_cgu_info *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
+ cmd = &desc.params.get_cgu_info;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ *cgu_id = le32_to_cpu(cmd->cgu_id);
+ *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver);
+ *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_set_phy_rec_clk_out - set RCLK phy out
+ * @hw: pointer to the HW struct
+ * @phy_output: PHY reference clock output pin
+ * @enable: GPIO state to be applied
+ * @freq: PHY output frequency
+ *
+ * Set phy recovered clock as reference (0x0630)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
+ u32 *freq)
+{
+ struct ice_aqc_set_phy_rec_clk_out *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
+ cmd = &desc.params.set_phy_rec_clk_out;
+ cmd->phy_output = phy_output;
+ cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
+ cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
+ cmd->freq = cpu_to_le32(*freq);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *freq = le32_to_cpu(cmd->freq);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_phy_rec_clk_out - get phy recovered signal info
+ * @hw: pointer to the HW struct
+ * @phy_output: PHY reference clock output pin
+ * @port_num: Port number
+ * @flags: PHY flags
+ * @node_handle: PHY output frequency
+ *
+ * Get PHY recovered clock output info (0x0631)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
+ u8 *flags, u16 *node_handle)
+{
+ struct ice_aqc_get_phy_rec_clk_out *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
+ cmd = &desc.params.get_phy_rec_clk_out;
+ cmd->phy_output = *phy_output;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ *phy_output = cmd->phy_output;
+ if (port_num)
+ *port_num = cmd->port_num;
+ if (flags)
+ *flags = cmd->flags;
+ if (node_handle)
+ *node_handle = le16_to_cpu(cmd->node_handle);
+ }
+
return status;
}
@@ -5268,81 +5769,6 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
- * ice_aq_set_driver_param - Set driver parameter to share via firmware
- * @hw: pointer to the HW struct
- * @idx: parameter index to set
- * @value: the value to set the parameter to
- * @cd: pointer to command details structure or NULL
- *
- * Set the value of one of the software defined parameters. All PFs connected
- * to this device can read the value using ice_aq_get_driver_param.
- *
- * Note that firmware provides no synchronization or locking, and will not
- * save the parameter value during a device reset. It is expected that
- * a single PF will write the parameter value, while all other PFs will only
- * read it.
- */
-int
-ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 value, struct ice_sq_cd *cd)
-{
- struct ice_aqc_driver_shared_params *cmd;
- struct ice_aq_desc desc;
-
- if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
- return -EIO;
-
- cmd = &desc.params.drv_shared_params;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
-
- cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
- cmd->param_indx = idx;
- cmd->param_val = cpu_to_le32(value);
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
-/**
- * ice_aq_get_driver_param - Get driver parameter shared via firmware
- * @hw: pointer to the HW struct
- * @idx: parameter index to set
- * @value: storage to return the shared parameter
- * @cd: pointer to command details structure or NULL
- *
- * Get the value of one of the software defined parameters.
- *
- * Note that firmware provides no synchronization or locking. It is expected
- * that only a single PF will write a given parameter.
- */
-int
-ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 *value, struct ice_sq_cd *cd)
-{
- struct ice_aqc_driver_shared_params *cmd;
- struct ice_aq_desc desc;
- int status;
-
- if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
- return -EIO;
-
- cmd = &desc.params.drv_shared_params;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
-
- cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
- cmd->param_indx = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
- if (status)
- return status;
-
- *value = le32_to_cpu(cmd->param_val);
-
- return 0;
-}
-
-/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 226b81f97a92..31fdcac33986 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -93,6 +93,13 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
bool ice_is_pf_c827(struct ice_hw *hw);
+bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
+bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
+bool ice_is_cgu_in_netlist(struct ice_hw *hw);
+bool ice_is_gps_in_netlist(struct ice_hw *hw);
+int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
@@ -196,6 +203,44 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
+int
+ice_aq_get_cgu_abilities(struct ice_hw *hw,
+ struct ice_aqc_get_cgu_abilities *abilities);
+int
+ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
+ u32 freq, s32 phase_delay);
+int
+ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
+ u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay);
+int
+ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
+ u8 src_sel, u32 freq, s32 phase_delay);
+int
+ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
+ u8 *src_sel, u32 *freq, u32 *src_freq);
+int
+ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
+ u8 *dpll_state, u8 *config, s64 *phase_offset,
+ u8 *eec_mode);
+int
+ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
+ u8 config, u8 eec_mode);
+int
+ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 ref_priority);
+int
+ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 *ref_prio);
+int
+ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
+ u32 *cgu_fw_ver);
+
+int
+ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
+ u32 *freq);
+int
+ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
+ u8 *flags, u16 *node_handle);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
@@ -208,12 +253,6 @@ int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
int
-ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 value, struct ice_sq_cd *cd);
-int
-ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 *value, struct ice_sq_cd *cd);
-int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index b27ec93638b6..78ed909745fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1560,21 +1560,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_aqc_get_pkg_info_resp *pkg_info;
- u16 size;
+ DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
+ ICE_PKG_CNT);
+ u16 size = __struct_size(pkg_info);
u32 i;
- size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
- pkg_info = kzalloc(size, GFP_KERNEL);
- if (!pkg_info)
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL))
return ICE_DDP_PKG_ERR;
- if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
- state = ICE_DDP_PKG_ERR;
- goto init_pkg_free_alloc;
- }
-
for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
#define ICE_PKG_FLAG_COUNT 4
char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
@@ -1604,10 +1597,7 @@ static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
pkg_info->pkg_info[i].name, flags);
}
-init_pkg_free_alloc:
- kfree(pkg_info);
-
- return state;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1622,9 +1612,10 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
- struct ice_aqc_get_pkg_info_resp *pkg;
+ DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
+ ICE_PKG_CNT);
+ u16 size = __struct_size(pkg);
enum ice_ddp_state state;
- u16 size;
u32 i;
/* Check package version compatibility */
@@ -1643,15 +1634,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
}
/* Check if FW is compatible with the OS package */
- size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
- pkg = kzalloc(size, GFP_KERNEL);
- if (!pkg)
- return ICE_DDP_PKG_ERR;
-
- if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
- state = ICE_DDP_PKG_LOAD_ERROR;
- goto fw_ddp_compat_free_alloc;
- }
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL))
+ return ICE_DDP_PKG_LOAD_ERROR;
for (i = 0; i < le32_to_cpu(pkg->count); i++) {
/* loop till we find the NVM package */
@@ -1668,8 +1652,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
/* done processing NVM package so break */
break;
}
-fw_ddp_compat_free_alloc:
- kfree(pkg);
+
return state;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
new file mode 100644
index 000000000000..1faee9cb944d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -0,0 +1,1904 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_trace.h"
+#include <linux/dpll.h>
+
+#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50
+#define ICE_DPLL_PIN_IDX_INVALID 0xff
+#define ICE_DPLL_RCLK_NUM_PER_PF 1
+
+/**
+ * enum ice_dpll_pin_type - enumerate ice pin types:
+ * @ICE_DPLL_PIN_INVALID: invalid pin type
+ * @ICE_DPLL_PIN_TYPE_INPUT: input pin
+ * @ICE_DPLL_PIN_TYPE_OUTPUT: output pin
+ * @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin
+ */
+enum ice_dpll_pin_type {
+ ICE_DPLL_PIN_INVALID,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ ICE_DPLL_PIN_TYPE_OUTPUT,
+ ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+};
+
+static const char * const pin_type_name[] = {
+ [ICE_DPLL_PIN_TYPE_INPUT] = "input",
+ [ICE_DPLL_PIN_TYPE_OUTPUT] = "output",
+ [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
+};
+
+/**
+ * ice_dpll_pin_freq_set - set pin's frequency
+ * @pf: private board structure
+ * @pin: pointer to a pin
+ * @pin_type: type of pin being configured
+ * @freq: frequency to be set
+ * @extack: error reporting
+ *
+ * Set requested frequency on a pin.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error on AQ or wrong pin type given
+ */
+static int
+ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type, const u32 freq,
+ struct netlink_ext_ack *extack)
+{
+ u8 flags;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ flags = ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, pin->idx, flags,
+ pin->flags[0], freq, 0);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, pin->idx, flags,
+ 0, freq, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ freq, pin->idx);
+ return ret;
+ }
+ pin->freq = freq;
+
+ return 0;
+}
+
+/**
+ * ice_dpll_frequency_set - wrapper for pin callback for set frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: frequency to be set
+ * @extack: error reporting
+ * @pin_type: type of pin being configured
+ *
+ * Wraps internal set frequency command on a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't set in hw
+ */
+static int
+ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u32 frequency,
+ struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_frequency_set - input pin callback for set frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: frequency to be set
+ * @extack: error reporting
+ *
+ * Wraps internal set frequency command on a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't set in hw
+ */
+static int
+ice_dpll_input_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_output_frequency_set - output pin callback for set frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: frequency to be set
+ * @extack: error reporting
+ *
+ * Wraps internal set frequency command on a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't set in hw
+ */
+static int
+ice_dpll_output_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_frequency_get - wrapper for pin callback for get frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ * @pin_type: type of pin being configured
+ *
+ * Wraps internal get frequency command of a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't get from hw
+ */
+static int
+ice_dpll_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *frequency = p->freq;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_frequency_get - input pin callback for get frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Wraps internal get frequency command of a input pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't get from hw
+ */
+static int
+ice_dpll_input_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_output_frequency_get - output pin callback for get frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Wraps internal get frequency command of a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't get from hw
+ */
+static int
+ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_pin_enable - enable a pin on dplls
+ * @hw: board private hw structure
+ * @pin: pointer to a pin
+ * @pin_type: type of pin being enabled
+ * @extack: error reporting
+ *
+ * Enable a pin on both dplls. Store current state in pin->flags.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type,
+ struct netlink_ext_ack *extack)
+{
+ u8 flags = 0;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to enable %s pin:%u\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_disable - disable a pin on dplls
+ * @hw: board private hw structure
+ * @pin: pointer to a pin
+ * @pin_type: type of pin being disabled
+ * @extack: error reporting
+ *
+ * Disable a pin on both dplls. Store current state in pin->flags.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type,
+ struct netlink_ext_ack *extack)
+{
+ u8 flags = 0;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to disable %s pin:%u\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_state_update - update pin's state
+ * @pf: private board struct
+ * @pin: structure with pin attributes to be updated
+ * @pin_type: type of pin being updated
+ * @extack: error reporting
+ *
+ * Determine pin current state and frequency, then update struct
+ * holding the pin info. For input pin states are separated for each
+ * dpll, for rclk pins states are separated for each parent.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type,
+ struct netlink_ext_ack *extack)
+{
+ u8 parent, port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
+ NULL, &pin->flags[0],
+ &pin->freq, NULL);
+ if (ret)
+ goto err;
+ if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
+ if (pin->pin) {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ pin->pin == pf->dplls.eec.active_input ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_SELECTABLE;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ pin->pin == pf->dplls.pps.active_input ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_SELECTABLE;
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_SELECTABLE;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_SELECTABLE;
+ }
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
+ &pin->flags[0], NULL,
+ &pin->freq, NULL);
+ if (ret)
+ goto err;
+ if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
+ pin->state[0] = DPLL_PIN_STATE_CONNECTED;
+ else
+ pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+ break;
+ case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
+ for (parent = 0; parent < pf->dplls.rclk.num_parents;
+ parent++) {
+ u8 p = parent;
+
+ ret = ice_aq_get_phy_rec_clk_out(&pf->hw, &p,
+ &port_num,
+ &pin->flags[parent],
+ NULL);
+ if (ret)
+ goto err;
+ if (ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN &
+ pin->flags[parent])
+ pin->state[parent] = DPLL_PIN_STATE_CONNECTED;
+ else
+ pin->state[parent] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+err:
+ if (extack)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to update %s pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+ else
+ dev_err_ratelimited(ice_pf_to_dev(pf),
+ "err:%d %s failed to update %s pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+ return ret;
+}
+
+/**
+ * ice_dpll_hw_input_prio_set - set input priority value in hardware
+ * @pf: board private structure
+ * @dpll: ice dpll pointer
+ * @pin: ice pin pointer
+ * @prio: priority value being set on a dpll
+ * @extack: error reporting
+ *
+ * Internal wrapper for setting the priority in the hardware.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
+ struct ice_dpll_pin *pin, const u32 prio,
+ struct netlink_ext_ack *extack)
+{
+ int ret;
+
+ ret = ice_aq_set_cgu_ref_prio(&pf->hw, dpll->dpll_idx, pin->idx,
+ (u8)prio);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ prio, pin->idx);
+ else
+ dpll->input_prio[pin->idx] = prio;
+
+ return ret;
+}
+
+/**
+ * ice_dpll_lock_status_get - get dpll lock status callback
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @status: on success holds dpll's lock status
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback, provides dpll's lock status.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *status = d->dpll_state;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_mode_supported - check if dpll's working mode is supported
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @mode: mode to be checked for support
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides information if working mode is supported
+ * by dpll.
+ *
+ * Return:
+ * * true - mode is supported
+ * * false - mode is not supported
+ */
+static bool ice_dpll_mode_supported(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_mode mode,
+ struct netlink_ext_ack *extack)
+{
+ if (mode == DPLL_MODE_AUTOMATIC)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_dpll_mode_get - get dpll's working mode
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @mode: on success holds current working mode of dpll
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides working mode of dpll.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *mode = d->mode;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_pin_state_set - set pin's state on dpll
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @enable: if pin shalll be enabled
+ * @extack: error reporting
+ * @pin_type: type of a pin
+ *
+ * Set pin state on a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - OK or no change required
+ * * negative - error
+ */
+static int
+ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ bool enable, struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ if (enable)
+ ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+ else
+ ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_state_set - enable/disable output pin on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: dpll being configured
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: state of pin to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set given state on output type pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - successfully enabled mode
+ * * negative - failed to enable mode
+ */
+static int
+ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ bool enable = state == DPLL_PIN_STATE_CONNECTED;
+
+ return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_input_state_set - enable/disable input pin on dpll levice
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: dpll being configured
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: state of pin to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Enables given mode on input type pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - successfully enabled mode
+ * * negative - failed to enable mode
+ */
+static int
+ice_dpll_input_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ bool enable = state == DPLL_PIN_STATE_SELECTABLE;
+
+ return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_pin_state_get - set pin's state on dpll
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ * @pin_type: type of questioned pin
+ *
+ * Determine pin state set it on a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
+ if (ret)
+ goto unlock;
+ if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+ *state = p->state[d->dpll_idx];
+ else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
+ *state = p->state[0];
+ ret = 0;
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_state_get - get output pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_output_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_input_state_get - get input pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a input pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_input_prio_get - get dpll's input prio
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @prio: on success - returns input priority on dpll
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting priority of a input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_input_prio_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *prio = d->input_prio[p->idx];
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_prio_set - set dpll input prio
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @prio: input priority to be set on dpll
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting priority of a input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ if (prio > ICE_DPLL_PRIO_MAX) {
+ NL_SET_ERR_MSG_FMT(extack, "prio out of supported range 0-%d",
+ ICE_DPLL_PRIO_MAX);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_direction - callback for get input pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: holds input pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of a input pin.
+ *
+ * Return:
+ * * 0 - success
+ */
+static int
+ice_dpll_input_direction(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ *direction = DPLL_PIN_DIRECTION_INPUT;
+
+ return 0;
+}
+
+/**
+ * ice_dpll_output_direction - callback for get output pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: holds output pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of an output pin.
+ *
+ * Return:
+ * * 0 - success
+ */
+static int
+ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ *direction = DPLL_PIN_DIRECTION_OUTPUT;
+
+ return 0;
+}
+
+/**
+ * ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @parent_pin: pin parent pointer
+ * @parent_pin_priv: parent private data pointer passed on pin registration
+ * @state: state to be set on pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback, set a state of a rclk pin on a parent pin
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
+ bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_pf *pf = p->pf;
+ int ret = -EINVAL;
+ u32 hw_idx;
+
+ mutex_lock(&pf->dplls.lock);
+ hw_idx = parent->idx - pf->dplls.base_rclk_idx;
+ if (hw_idx >= pf->dplls.num_inputs)
+ goto unlock;
+
+ if ((enable && p->state[hw_idx] == DPLL_PIN_STATE_CONNECTED) ||
+ (!enable && p->state[hw_idx] == DPLL_PIN_STATE_DISCONNECTED)) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "pin:%u state:%u on parent:%u already set",
+ p->idx, state, parent->idx);
+ goto unlock;
+ }
+ ret = ice_aq_set_phy_rec_clk_out(&pf->hw, hw_idx, enable,
+ &p->freq);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ state, p->idx, parent->idx);
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_rclk_state_on_pin_get - get a state of rclk pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @parent_pin: pin parent pointer
+ * @parent_pin_priv: pin parent priv data pointer passed on pin registration
+ * @state: on success holds pin state on parent pin
+ * @extack: error reporting
+ *
+ * dpll subsystem callback, get a state of a recovered clock pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
+ struct ice_pf *pf = p->pf;
+ int ret = -EINVAL;
+ u32 hw_idx;
+
+ mutex_lock(&pf->dplls.lock);
+ hw_idx = parent->idx - pf->dplls.base_rclk_idx;
+ if (hw_idx >= pf->dplls.num_inputs)
+ goto unlock;
+
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+ extack);
+ if (ret)
+ goto unlock;
+
+ *state = p->state[hw_idx];
+ ret = 0;
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+static const struct dpll_pin_ops ice_dpll_rclk_ops = {
+ .state_on_pin_set = ice_dpll_rclk_state_on_pin_set,
+ .state_on_pin_get = ice_dpll_rclk_state_on_pin_get,
+ .direction_get = ice_dpll_input_direction,
+};
+
+static const struct dpll_pin_ops ice_dpll_input_ops = {
+ .frequency_get = ice_dpll_input_frequency_get,
+ .frequency_set = ice_dpll_input_frequency_set,
+ .state_on_dpll_get = ice_dpll_input_state_get,
+ .state_on_dpll_set = ice_dpll_input_state_set,
+ .prio_get = ice_dpll_input_prio_get,
+ .prio_set = ice_dpll_input_prio_set,
+ .direction_get = ice_dpll_input_direction,
+};
+
+static const struct dpll_pin_ops ice_dpll_output_ops = {
+ .frequency_get = ice_dpll_output_frequency_get,
+ .frequency_set = ice_dpll_output_frequency_set,
+ .state_on_dpll_get = ice_dpll_output_state_get,
+ .state_on_dpll_set = ice_dpll_output_state_set,
+ .direction_get = ice_dpll_output_direction,
+};
+
+static const struct dpll_device_ops ice_dpll_ops = {
+ .lock_status_get = ice_dpll_lock_status_get,
+ .mode_supported = ice_dpll_mode_supported,
+ .mode_get = ice_dpll_mode_get,
+};
+
+/**
+ * ice_generate_clock_id - generates unique clock_id for registering dpll.
+ * @pf: board private structure
+ *
+ * Generates unique (per board) clock_id for allocation and search of dpll
+ * devices in Linux dpll subsystem.
+ *
+ * Return: generated clock id for the board
+ */
+static u64 ice_generate_clock_id(struct ice_pf *pf)
+{
+ return pci_get_dsn(pf->pdev);
+}
+
+/**
+ * ice_dpll_notify_changes - notify dpll subsystem about changes
+ * @d: pointer do dpll
+ *
+ * Once change detected appropriate event is submitted to the dpll subsystem.
+ */
+static void ice_dpll_notify_changes(struct ice_dpll *d)
+{
+ if (d->prev_dpll_state != d->dpll_state) {
+ d->prev_dpll_state = d->dpll_state;
+ dpll_device_change_ntf(d->dpll);
+ }
+ if (d->prev_input != d->active_input) {
+ if (d->prev_input)
+ dpll_pin_change_ntf(d->prev_input);
+ d->prev_input = d->active_input;
+ if (d->active_input)
+ dpll_pin_change_ntf(d->active_input);
+ }
+}
+
+/**
+ * ice_dpll_update_state - update dpll state
+ * @pf: pf private structure
+ * @d: pointer to queried dpll device
+ * @init: if function called on initialization of ice dpll
+ *
+ * Poll current state of dpll from hw and update ice_dpll struct.
+ *
+ * Context: Called by kworker under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - AQ failure
+ */
+static int
+ice_dpll_update_state(struct ice_pf *pf, struct ice_dpll *d, bool init)
+{
+ struct ice_dpll_pin *p = NULL;
+ int ret;
+
+ ret = ice_get_cgu_state(&pf->hw, d->dpll_idx, d->prev_dpll_state,
+ &d->input_idx, &d->ref_state, &d->eec_mode,
+ &d->phase_shift, &d->dpll_state);
+
+ dev_dbg(ice_pf_to_dev(pf),
+ "update dpll=%d, prev_src_idx:%u, src_idx:%u, state:%d, prev:%d mode:%d\n",
+ d->dpll_idx, d->prev_input_idx, d->input_idx,
+ d->dpll_state, d->prev_dpll_state, d->mode);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "update dpll=%d state failed, ret=%d %s\n",
+ d->dpll_idx, ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+ return ret;
+ }
+ if (init) {
+ if (d->dpll_state == DPLL_LOCK_STATUS_LOCKED ||
+ d->dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ)
+ d->active_input = pf->dplls.inputs[d->input_idx].pin;
+ p = &pf->dplls.inputs[d->input_idx];
+ return ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT, NULL);
+ }
+ if (d->dpll_state == DPLL_LOCK_STATUS_HOLDOVER ||
+ d->dpll_state == DPLL_LOCK_STATUS_UNLOCKED) {
+ d->active_input = NULL;
+ if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID)
+ p = &pf->dplls.inputs[d->input_idx];
+ d->prev_input_idx = ICE_DPLL_PIN_IDX_INVALID;
+ d->input_idx = ICE_DPLL_PIN_IDX_INVALID;
+ if (!p)
+ return 0;
+ ret = ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT, NULL);
+ } else if (d->input_idx != d->prev_input_idx) {
+ if (d->prev_input_idx != ICE_DPLL_PIN_IDX_INVALID) {
+ p = &pf->dplls.inputs[d->prev_input_idx];
+ ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ NULL);
+ }
+ if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID) {
+ p = &pf->dplls.inputs[d->input_idx];
+ d->active_input = p->pin;
+ ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ NULL);
+ }
+ d->prev_input_idx = d->input_idx;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_dpll_periodic_work - DPLLs periodic worker
+ * @work: pointer to kthread_work structure
+ *
+ * DPLLs periodic worker is responsible for polling state of dpll.
+ * Context: Holds pf->dplls.lock
+ */
+static void ice_dpll_periodic_work(struct kthread_work *work)
+{
+ struct ice_dplls *d = container_of(work, struct ice_dplls, work.work);
+ struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
+ struct ice_dpll *de = &pf->dplls.eec;
+ struct ice_dpll *dp = &pf->dplls.pps;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_update_state(pf, de, false);
+ if (!ret)
+ ret = ice_dpll_update_state(pf, dp, false);
+ if (ret) {
+ d->cgu_state_acq_err_num++;
+ /* stop rescheduling this worker */
+ if (d->cgu_state_acq_err_num >
+ ICE_CGU_STATE_ACQ_ERR_THRESHOLD) {
+ dev_err(ice_pf_to_dev(pf),
+ "EEC/PPS DPLLs periodic work disabled\n");
+ mutex_unlock(&pf->dplls.lock);
+ return;
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+ ice_dpll_notify_changes(de);
+ ice_dpll_notify_changes(dp);
+
+ /* Run twice a second or reschedule if update failed */
+ kthread_queue_delayed_work(d->kworker, &d->work,
+ ret ? msecs_to_jiffies(10) :
+ msecs_to_jiffies(500));
+}
+
+/**
+ * ice_dpll_release_pins - release pins resources from dpll subsystem
+ * @pins: pointer to pins array
+ * @count: number of pins
+ *
+ * Release resources of given pins array in the dpll subsystem.
+ */
+static void ice_dpll_release_pins(struct ice_dpll_pin *pins, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ dpll_pin_put(pins[i].pin);
+}
+
+/**
+ * ice_dpll_get_pins - get pins from dpll subsystem
+ * @pf: board private structure
+ * @pins: pointer to pins array
+ * @start_idx: get starts from this pin idx value
+ * @count: number of pins
+ * @clock_id: clock_id of dpll device
+ *
+ * Get pins - allocate - in dpll subsystem, store them in pin field of given
+ * pins array.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - allocation failure reason
+ */
+static int
+ice_dpll_get_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
+ int start_idx, int count, u64 clock_id)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ pins[i].pin = dpll_pin_get(clock_id, i + start_idx, THIS_MODULE,
+ &pins[i].prop);
+ if (IS_ERR(pins[i].pin)) {
+ ret = PTR_ERR(pins[i].pin);
+ goto release_pins;
+ }
+ }
+
+ return 0;
+
+release_pins:
+ while (--i >= 0)
+ dpll_pin_put(pins[i].pin);
+ return ret;
+}
+
+/**
+ * ice_dpll_unregister_pins - unregister pins from a dpll
+ * @dpll: dpll device pointer
+ * @pins: pointer to pins array
+ * @ops: callback ops registered with the pins
+ * @count: number of pins
+ *
+ * Unregister pins of a given array of pins from given dpll device registered in
+ * dpll subsystem.
+ */
+static void
+ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
+ const struct dpll_pin_ops *ops, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+}
+
+/**
+ * ice_dpll_register_pins - register pins with a dpll
+ * @dpll: dpll pointer to register pins with
+ * @pins: pointer to pins array
+ * @ops: callback ops registered with the pins
+ * @count: number of pins
+ *
+ * Register pins of a given array with given dpll in dpll subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
+ const struct dpll_pin_ops *ops, int count)
+{
+ int ret, i;
+
+ for (i = 0; i < count; i++) {
+ ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
+ if (ret)
+ goto unregister_pins;
+ }
+
+ return 0;
+
+unregister_pins:
+ while (--i >= 0)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_direct_pins - deinitialize direct pins
+ * @cgu: if cgu is present and controlled by this NIC
+ * @pins: pointer to pins array
+ * @count: number of pins
+ * @ops: callback ops registered with the pins
+ * @first: dpll device pointer
+ * @second: dpll device pointer
+ *
+ * If cgu is owned unregister pins from given dplls.
+ * Release pins resources to the dpll subsystem.
+ */
+static void
+ice_dpll_deinit_direct_pins(bool cgu, struct ice_dpll_pin *pins, int count,
+ const struct dpll_pin_ops *ops,
+ struct dpll_device *first,
+ struct dpll_device *second)
+{
+ if (cgu) {
+ ice_dpll_unregister_pins(first, pins, ops, count);
+ ice_dpll_unregister_pins(second, pins, ops, count);
+ }
+ ice_dpll_release_pins(pins, count);
+}
+
+/**
+ * ice_dpll_init_direct_pins - initialize direct pins
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ * @pins: pointer to pins array
+ * @start_idx: on which index shall allocation start in dpll subsystem
+ * @count: number of pins
+ * @ops: callback ops registered with the pins
+ * @first: dpll device pointer
+ * @second: dpll device pointer
+ *
+ * Allocate directly connected pins of a given array in dpll subsystem.
+ * If cgu is owned register allocated pins with given dplls.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_init_direct_pins(struct ice_pf *pf, bool cgu,
+ struct ice_dpll_pin *pins, int start_idx, int count,
+ const struct dpll_pin_ops *ops,
+ struct dpll_device *first, struct dpll_device *second)
+{
+ int ret;
+
+ ret = ice_dpll_get_pins(pf, pins, start_idx, count, pf->dplls.clock_id);
+ if (ret)
+ return ret;
+ if (cgu) {
+ ret = ice_dpll_register_pins(first, pins, ops, count);
+ if (ret)
+ goto release_pins;
+ ret = ice_dpll_register_pins(second, pins, ops, count);
+ if (ret)
+ goto unregister_first;
+ }
+
+ return 0;
+
+unregister_first:
+ ice_dpll_unregister_pins(first, pins, ops, count);
+release_pins:
+ ice_dpll_release_pins(pins, count);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_rclk_pin - release rclk pin resources
+ * @pf: board private structure
+ *
+ * Deregister rclk pin from parent pins and release resources in dpll subsystem.
+ */
+static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *rclk = &pf->dplls.rclk;
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct dpll_pin *parent;
+ int i;
+
+ for (i = 0; i < rclk->num_parents; i++) {
+ parent = pf->dplls.inputs[rclk->parent_idx[i]].pin;
+ if (!parent)
+ continue;
+ dpll_pin_on_pin_unregister(parent, rclk->pin,
+ &ice_dpll_rclk_ops, rclk);
+ }
+ if (WARN_ON_ONCE(!vsi || !vsi->netdev))
+ return;
+ netdev_dpll_pin_clear(vsi->netdev);
+ dpll_pin_put(rclk->pin);
+}
+
+/**
+ * ice_dpll_init_rclk_pins - initialize recovered clock pin
+ * @pf: board private structure
+ * @pin: pin to register
+ * @start_idx: on which index shall allocation start in dpll subsystem
+ * @ops: callback ops registered with the pins
+ *
+ * Allocate resource for recovered clock pin in dpll subsystem. Register the
+ * pin with the parents it has in the info. Register pin with the pf's main vsi
+ * netdev.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ int start_idx, const struct dpll_pin_ops *ops)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct dpll_pin *parent;
+ int ret, i;
+
+ ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF,
+ pf->dplls.clock_id);
+ if (ret)
+ return ret;
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++) {
+ parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[i]].pin;
+ if (!parent) {
+ ret = -ENODEV;
+ goto unregister_pins;
+ }
+ ret = dpll_pin_on_pin_register(parent, pf->dplls.rclk.pin,
+ ops, &pf->dplls.rclk);
+ if (ret)
+ goto unregister_pins;
+ }
+ if (WARN_ON((!vsi || !vsi->netdev)))
+ return -EINVAL;
+ netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+
+ return 0;
+
+unregister_pins:
+ while (i) {
+ parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[--i]].pin;
+ dpll_pin_on_pin_unregister(parent, pf->dplls.rclk.pin,
+ &ice_dpll_rclk_ops, &pf->dplls.rclk);
+ }
+ ice_dpll_release_pins(pin, ICE_DPLL_RCLK_NUM_PER_PF);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_pins - deinitialize direct pins
+ * @pf: board private structure
+ * @cgu: if cgu is controlled by this pf
+ *
+ * If cgu is owned unregister directly connected pins from the dplls.
+ * Release resources of directly connected pins from the dpll subsystem.
+ */
+static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
+{
+ struct ice_dpll_pin *outputs = pf->dplls.outputs;
+ struct ice_dpll_pin *inputs = pf->dplls.inputs;
+ int num_outputs = pf->dplls.num_outputs;
+ int num_inputs = pf->dplls.num_inputs;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll *de = &d->eec;
+ struct ice_dpll *dp = &d->pps;
+
+ ice_dpll_deinit_rclk_pin(pf);
+ if (cgu) {
+ ice_dpll_unregister_pins(dp->dpll, inputs, &ice_dpll_input_ops,
+ num_inputs);
+ ice_dpll_unregister_pins(de->dpll, inputs, &ice_dpll_input_ops,
+ num_inputs);
+ }
+ ice_dpll_release_pins(inputs, num_inputs);
+ if (cgu) {
+ ice_dpll_unregister_pins(dp->dpll, outputs,
+ &ice_dpll_output_ops, num_outputs);
+ ice_dpll_unregister_pins(de->dpll, outputs,
+ &ice_dpll_output_ops, num_outputs);
+ ice_dpll_release_pins(outputs, num_outputs);
+ }
+}
+
+/**
+ * ice_dpll_init_pins - init pins and register pins with a dplls
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * Initialize directly connected pf's pins within pf's dplls in a Linux dpll
+ * subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
+{
+ u32 rclk_idx;
+ int ret;
+
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
+ pf->dplls.num_inputs,
+ &ice_dpll_input_ops,
+ pf->dplls.eec.dpll, pf->dplls.pps.dpll);
+ if (ret)
+ return ret;
+ if (cgu) {
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
+ pf->dplls.num_inputs,
+ pf->dplls.num_outputs,
+ &ice_dpll_output_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_inputs;
+ }
+ rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id;
+ ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx,
+ &ice_dpll_rclk_ops);
+ if (ret)
+ goto deinit_outputs;
+
+ return 0;
+deinit_outputs:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
+ pf->dplls.num_outputs,
+ &ice_dpll_output_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+deinit_inputs:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.inputs, pf->dplls.num_inputs,
+ &ice_dpll_input_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_dpll - deinitialize dpll device
+ * @pf: board private structure
+ * @d: pointer to ice_dpll
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * If cgu is owned unregister the dpll from dpll subsystem.
+ * Release resources of dpll device from dpll subsystem.
+ */
+static void
+ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
+{
+ if (cgu)
+ dpll_device_unregister(d->dpll, &ice_dpll_ops, d);
+ dpll_device_put(d->dpll);
+}
+
+/**
+ * ice_dpll_init_dpll - initialize dpll device in dpll subsystem
+ * @pf: board private structure
+ * @d: dpll to be initialized
+ * @cgu: if cgu is present and controlled by this NIC
+ * @type: type of dpll being initialized
+ *
+ * Allocate dpll instance for this board in dpll subsystem, if cgu is controlled
+ * by this NIC, register dpll with the callback ops.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int
+ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
+ enum dpll_type type)
+{
+ u64 clock_id = pf->dplls.clock_id;
+ int ret;
+
+ d->dpll = dpll_device_get(clock_id, d->dpll_idx, THIS_MODULE);
+ if (IS_ERR(d->dpll)) {
+ ret = PTR_ERR(d->dpll);
+ dev_err(ice_pf_to_dev(pf),
+ "dpll_device_get failed (%p) err=%d\n", d, ret);
+ return ret;
+ }
+ d->pf = pf;
+ if (cgu) {
+ ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
+ if (ret) {
+ dpll_device_put(d->dpll);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_dpll_deinit_worker - deinitialize dpll kworker
+ * @pf: board private structure
+ *
+ * Stop dpll's kworker, release it's resources.
+ */
+static void ice_dpll_deinit_worker(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+
+ kthread_cancel_delayed_work_sync(&d->work);
+ kthread_destroy_worker(d->kworker);
+}
+
+/**
+ * ice_dpll_init_worker - Initialize DPLLs periodic worker
+ * @pf: board private structure
+ *
+ * Create and start DPLLs periodic worker.
+ *
+ * Context: Shall be called after pf->dplls.lock is initialized.
+ * Return:
+ * * 0 - success
+ * * negative - create worker failure
+ */
+static int ice_dpll_init_worker(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ struct kthread_worker *kworker;
+
+ ice_dpll_update_state(pf, &d->eec, true);
+ ice_dpll_update_state(pf, &d->pps, true);
+ kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
+ kworker = kthread_create_worker(0, "ice-dplls-%s",
+ dev_name(ice_pf_to_dev(pf)));
+ if (IS_ERR(kworker))
+ return PTR_ERR(kworker);
+ d->kworker = kworker;
+ d->cgu_state_acq_err_num = 0;
+ kthread_queue_delayed_work(d->kworker, &d->work, 0);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_init_info_direct_pins - initializes direct pins info
+ * @pf: board private structure
+ * @pin_type: type of pins being initialized
+ *
+ * Init information for directly connected pins, cache them in pf's pins
+ * structures.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int
+ice_dpll_init_info_direct_pins(struct ice_pf *pf,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps;
+ int num_pins, i, ret = -EINVAL;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_dpll_pin *pins;
+ u8 freq_supp_num;
+ bool input;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ pins = pf->dplls.inputs;
+ num_pins = pf->dplls.num_inputs;
+ input = true;
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ pins = pf->dplls.outputs;
+ num_pins = pf->dplls.num_outputs;
+ input = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_pins; i++) {
+ pins[i].idx = i;
+ pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input);
+ pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input);
+ if (input) {
+ ret = ice_aq_get_cgu_ref_prio(hw, de->dpll_idx, i,
+ &de->input_prio[i]);
+ if (ret)
+ return ret;
+ ret = ice_aq_get_cgu_ref_prio(hw, dp->dpll_idx, i,
+ &dp->input_prio[i]);
+ if (ret)
+ return ret;
+ pins[i].prop.capabilities |=
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE;
+ }
+ pins[i].prop.capabilities |=
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
+ if (ret)
+ return ret;
+ pins[i].prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(hw, i, input, &freq_supp_num);
+ pins[i].prop.freq_supported_num = freq_supp_num;
+ pins[i].pf = pf;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_dpll_init_info_rclk_pin - initializes rclk pin information
+ * @pf: board private structure
+ *
+ * Init information for rclk pin, cache them in pf->dplls.rclk.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *pin = &pf->dplls.rclk;
+
+ pin->prop.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT;
+ pin->prop.capabilities |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ pin->pf = pf;
+
+ return ice_dpll_pin_state_update(pf, pin,
+ ICE_DPLL_PIN_TYPE_RCLK_INPUT, NULL);
+}
+
+/**
+ * ice_dpll_init_pins_info - init pins info wrapper
+ * @pf: board private structure
+ * @pin_type: type of pins being initialized
+ *
+ * Wraps functions for pin initialization.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int
+ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
+{
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ return ice_dpll_init_info_direct_pins(pf, pin_type);
+ case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
+ return ice_dpll_init_info_rclk_pin(pf);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * ice_dpll_deinit_info - release memory allocated for pins info
+ * @pf: board private structure
+ *
+ * Release memory allocated for pins by ice_dpll_init_info function.
+ */
+static void ice_dpll_deinit_info(struct ice_pf *pf)
+{
+ kfree(pf->dplls.inputs);
+ kfree(pf->dplls.outputs);
+ kfree(pf->dplls.eec.input_prio);
+ kfree(pf->dplls.pps.input_prio);
+}
+
+/**
+ * ice_dpll_init_info - prepare pf's dpll information structure
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * Acquire (from HW) and set basic dpll information (on pf->dplls struct).
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
+{
+ struct ice_aqc_get_cgu_abilities abilities;
+ struct ice_dpll *de = &pf->dplls.eec;
+ struct ice_dpll *dp = &pf->dplls.pps;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_hw *hw = &pf->hw;
+ int ret, alloc_size, i;
+
+ d->clock_id = ice_generate_clock_id(pf);
+ ret = ice_aq_get_cgu_abilities(hw, &abilities);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "err:%d %s failed to read cgu abilities\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+ return ret;
+ }
+
+ de->dpll_idx = abilities.eec_dpll_idx;
+ dp->dpll_idx = abilities.pps_dpll_idx;
+ d->num_inputs = abilities.num_inputs;
+ d->num_outputs = abilities.num_outputs;
+ d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
+ d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
+
+ alloc_size = sizeof(*d->inputs) * d->num_inputs;
+ d->inputs = kzalloc(alloc_size, GFP_KERNEL);
+ if (!d->inputs)
+ return -ENOMEM;
+
+ alloc_size = sizeof(*de->input_prio) * d->num_inputs;
+ de->input_prio = kzalloc(alloc_size, GFP_KERNEL);
+ if (!de->input_prio)
+ return -ENOMEM;
+
+ dp->input_prio = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dp->input_prio)
+ return -ENOMEM;
+
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_INPUT);
+ if (ret)
+ goto deinit_info;
+
+ if (cgu) {
+ alloc_size = sizeof(*d->outputs) * d->num_outputs;
+ d->outputs = kzalloc(alloc_size, GFP_KERNEL);
+ if (!d->outputs) {
+ ret = -ENOMEM;
+ goto deinit_info;
+ }
+
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT);
+ if (ret)
+ goto deinit_info;
+ }
+
+ ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
+ &pf->dplls.rclk.num_parents);
+ if (ret)
+ return ret;
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++)
+ pf->dplls.rclk.parent_idx[i] = d->base_rclk_idx + i;
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_RCLK_INPUT);
+ if (ret)
+ return ret;
+ de->mode = DPLL_MODE_AUTOMATIC;
+ dp->mode = DPLL_MODE_AUTOMATIC;
+
+ dev_dbg(ice_pf_to_dev(pf),
+ "%s - success, inputs:%u, outputs:%u rclk-parents:%u\n",
+ __func__, d->num_inputs, d->num_outputs, d->rclk.num_parents);
+
+ return 0;
+
+deinit_info:
+ dev_err(ice_pf_to_dev(pf),
+ "%s - fail: d->inputs:%p, de->input_prio:%p, dp->input_prio:%p, d->outputs:%p\n",
+ __func__, d->inputs, de->input_prio,
+ dp->input_prio, d->outputs);
+ ice_dpll_deinit_info(pf);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit - Disable the driver/HW support for dpll subsystem
+ * the dpll device.
+ * @pf: board private structure
+ *
+ * Handles the cleanup work required after dpll initialization, freeing
+ * resources and unregistering the dpll, pin and all resources used for
+ * handling them.
+ *
+ * Context: Destroys pf->dplls.lock mutex. Call only if ICE_FLAG_DPLL was set.
+ */
+void ice_dpll_deinit(struct ice_pf *pf)
+{
+ bool cgu = ice_is_feature_supported(pf, ICE_F_CGU);
+
+ clear_bit(ICE_FLAG_DPLL, pf->flags);
+ if (cgu)
+ ice_dpll_deinit_worker(pf);
+
+ ice_dpll_deinit_pins(pf, cgu);
+ ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
+ ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
+ ice_dpll_deinit_info(pf);
+ mutex_destroy(&pf->dplls.lock);
+}
+
+/**
+ * ice_dpll_init - initialize support for dpll subsystem
+ * @pf: board private structure
+ *
+ * Set up the device dplls, register them and pins connected within Linux dpll
+ * subsystem. Allow userspace to obtain state of DPLL and handling of DPLL
+ * configuration requests.
+ *
+ * Context: Initializes pf->dplls.lock mutex.
+ */
+void ice_dpll_init(struct ice_pf *pf)
+{
+ bool cgu = ice_is_feature_supported(pf, ICE_F_CGU);
+ struct ice_dplls *d = &pf->dplls;
+ int err = 0;
+
+ err = ice_dpll_init_info(pf, cgu);
+ if (err)
+ goto err_exit;
+ err = ice_dpll_init_dpll(pf, &pf->dplls.eec, cgu, DPLL_TYPE_EEC);
+ if (err)
+ goto deinit_info;
+ err = ice_dpll_init_dpll(pf, &pf->dplls.pps, cgu, DPLL_TYPE_PPS);
+ if (err)
+ goto deinit_eec;
+ err = ice_dpll_init_pins(pf, cgu);
+ if (err)
+ goto deinit_pps;
+ mutex_init(&d->lock);
+ if (cgu) {
+ err = ice_dpll_init_worker(pf);
+ if (err)
+ goto deinit_pins;
+ }
+ set_bit(ICE_FLAG_DPLL, pf->flags);
+
+ return;
+
+deinit_pins:
+ ice_dpll_deinit_pins(pf, cgu);
+deinit_pps:
+ ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
+deinit_eec:
+ ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
+deinit_info:
+ ice_dpll_deinit_info(pf);
+err_exit:
+ mutex_destroy(&d->lock);
+ dev_warn(ice_pf_to_dev(pf), "DPLLs init failure err:%d\n", err);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
new file mode 100644
index 000000000000..2dfe764b81e1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_DPLL_H_
+#define _ICE_DPLL_H_
+
+#include "ice.h"
+
+#define ICE_DPLL_PRIO_MAX 0xF
+#define ICE_DPLL_RCLK_NUM_MAX 4
+
+/** ice_dpll_pin - store info about pins
+ * @pin: dpll pin structure
+ * @pf: pointer to pf, which has registered the dpll_pin
+ * @idx: ice pin private idx
+ * @num_parents: hols number of parent pins
+ * @parent_idx: hold indexes of parent pins
+ * @flags: pin flags returned from HW
+ * @state: state of a pin
+ * @prop: pin properties
+ * @freq: current frequency of a pin
+ */
+struct ice_dpll_pin {
+ struct dpll_pin *pin;
+ struct ice_pf *pf;
+ u8 idx;
+ u8 num_parents;
+ u8 parent_idx[ICE_DPLL_RCLK_NUM_MAX];
+ u8 flags[ICE_DPLL_RCLK_NUM_MAX];
+ u8 state[ICE_DPLL_RCLK_NUM_MAX];
+ struct dpll_pin_properties prop;
+ u32 freq;
+};
+
+/** ice_dpll - store info required for DPLL control
+ * @dpll: pointer to dpll dev
+ * @pf: pointer to pf, which has registered the dpll_device
+ * @dpll_idx: index of dpll on the NIC
+ * @input_idx: currently selected input index
+ * @prev_input_idx: previously selected input index
+ * @ref_state: state of dpll reference signals
+ * @eec_mode: eec_mode dpll is configured for
+ * @phase_shift: phase shift delay of a dpll
+ * @input_prio: priorities of each input
+ * @dpll_state: current dpll sync state
+ * @prev_dpll_state: last dpll sync state
+ * @active_input: pointer to active input pin
+ * @prev_input: pointer to previous active input pin
+ */
+struct ice_dpll {
+ struct dpll_device *dpll;
+ struct ice_pf *pf;
+ u8 dpll_idx;
+ u8 input_idx;
+ u8 prev_input_idx;
+ u8 ref_state;
+ u8 eec_mode;
+ s64 phase_shift;
+ u8 *input_prio;
+ enum dpll_lock_status dpll_state;
+ enum dpll_lock_status prev_dpll_state;
+ enum dpll_mode mode;
+ struct dpll_pin *active_input;
+ struct dpll_pin *prev_input;
+};
+
+/** ice_dplls - store info required for CCU (clock controlling unit)
+ * @kworker: periodic worker
+ * @work: periodic work
+ * @lock: locks access to configuration of a dpll
+ * @eec: pointer to EEC dpll dev
+ * @pps: pointer to PPS dpll dev
+ * @inputs: input pins pointer
+ * @outputs: output pins pointer
+ * @rclk: recovered pins pointer
+ * @num_inputs: number of input pins available on dpll
+ * @num_outputs: number of output pins available on dpll
+ * @cgu_state_acq_err_num: number of errors returned during periodic work
+ * @base_rclk_idx: idx of first pin used for clock revocery pins
+ * @clock_id: clock_id of dplls
+ */
+struct ice_dplls {
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work work;
+ struct mutex lock;
+ struct ice_dpll eec;
+ struct ice_dpll pps;
+ struct ice_dpll_pin *inputs;
+ struct ice_dpll_pin *outputs;
+ struct ice_dpll_pin rclk;
+ u8 num_inputs;
+ u8 num_outputs;
+ int cgu_state_acq_err_num;
+ u8 base_rclk_idx;
+ u64 clock_id;
+ s32 input_phase_adj_max;
+ s32 output_phase_adj_max;
+};
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+void ice_dpll_init(struct ice_pf *pf);
+void ice_dpll_deinit(struct ice_pf *pf);
+#else
+static inline void ice_dpll_init(struct ice_pf *pf) { }
+static inline void ice_dpll_deinit(struct ice_pf *pf) { }
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index ad4d4702129f..d3cb08e66dcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3285,7 +3285,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = ice_get_ptp_clock_index(pf);
+ info->phc_index = ice_ptp_clock_index(pf);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 75c9de675f20..c8ea1af51ad3 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -389,6 +389,9 @@ bool ice_gnss_is_gps_present(struct ice_hw *hw)
if (!hw->func_caps.ts_func_info.src_tmr_owned)
return false;
+ if (!ice_is_gps_in_netlist(hw))
+ return false;
+
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
if (ice_is_e810t(hw)) {
int err;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 531cc2194741..6756f3d51d14 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -231,6 +231,7 @@
#define PFINT_SB_CTL 0x0016B600
#define PFINT_SB_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_SB_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_TSYN_MSK 0x0016C980
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 4f39863b5537..2c96d1883e19 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -430,10 +430,11 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
- u16 numq, valq, buf_size, num_moved, qbuf_size;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
- struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
struct ice_hw *new_hw = NULL;
__le32 teid, parent_teid;
@@ -505,26 +506,17 @@ qbuf_none:
goto resume_traffic;
/* Move Vf's VSI node for this TC to newport's scheduler tree */
- buf_size = struct_size(buf, teid, 1);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- dev_warn(dev, "Failure to alloc memory for VF node failover\n");
- goto resume_traffic;
- }
-
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
- if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
- NULL))
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for failover\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
- kfree(buf);
goto resume_traffic;
qbuf_err:
@@ -755,10 +747,11 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
- u16 numq, valq, buf_size, num_moved, qbuf_size;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
- struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
@@ -820,26 +813,17 @@ reclaim_none:
goto resume_reclaim;
/* Move node to new parent */
- buf_size = struct_size(buf, teid, 1);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- dev_warn(dev, "Failure to alloc memory for VF node failover\n");
- goto resume_reclaim;
- }
-
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
- if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
- NULL))
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
- kfree(buf);
goto resume_reclaim;
reclaim_qerr:
@@ -1792,10 +1776,11 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
- u16 numq, valq, buf_size, num_moved, qbuf_size;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
- struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
@@ -1853,26 +1838,17 @@ sync_none:
goto resume_sync;
/* Move node to new parent */
- buf_size = struct_size(buf, teid, 1);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- dev_warn(dev, "Failure to alloc for VF node move in reset rebuild\n");
- goto resume_sync;
- }
-
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
- if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
- NULL))
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
- kfree(buf);
goto resume_sync;
sync_qerr:
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 201570cd2e0b..acc3ffc940e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1832,21 +1832,14 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
- struct ice_aqc_add_tx_qgrp *qg_buf;
- int err;
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
- qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
-
qg_buf->num_txqs = 1;
- err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
- kfree(qg_buf);
- return err;
+ return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
}
/**
@@ -1888,24 +1881,18 @@ setup_rings:
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
- struct ice_aqc_add_tx_qgrp *qg_buf;
- u16 q_idx = 0;
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
-
- qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
+ u16 q_idx;
qg_buf->num_txqs = 1;
for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
- goto err_cfg_txqs;
+ break;
}
-err_cfg_txqs:
- kfree(qg_buf);
return err;
}
@@ -3985,13 +3972,21 @@ void ice_init_feature_support(struct ice_pf *pf)
case ICE_DEV_ID_E810C_BACKPLANE:
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
+ case ICE_DEV_ID_E810_XXV_QSFP:
+ case ICE_DEV_ID_E810_XXV_SFP:
ice_set_feature_support(pf, ICE_F_DSCP);
- ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
- if (ice_is_e810t(&pf->hw)) {
+ if (ice_is_phy_rclk_in_netlist(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_PHY_RCLK);
+ /* If we don't own the timer - don't enable other caps */
+ if (!ice_pf_src_tmr_owned(pf))
+ break;
+ if (ice_is_cgu_in_netlist(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_CGU);
+ if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
- ice_set_feature_support(pf, ICE_F_GNSS);
- }
+ if (ice_gnss_is_gps_present(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c8286adae946..c726913bc635 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3149,7 +3149,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (!hw->reset_ongoing)
+ if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf))
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
}
@@ -3159,7 +3159,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
- if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ if (ice_pf_src_tmr_owned(pf)) {
/* Save EVENTs from GLTSYN register */
pf->ptp.ext_ts_irq |= gltsyn_stat &
(GLTSYN_STAT_EVENT0_M |
@@ -4665,6 +4665,10 @@ static void ice_init_features(struct ice_pf *pf)
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
+ if (ice_is_feature_supported(pf, ICE_F_CGU) ||
+ ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
+ ice_dpll_init(pf);
+
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
@@ -4691,6 +4695,8 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_gnss_exit(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
+ if (test_bit(ICE_FLAG_DPLL, pf->flags))
+ ice_dpll_deinit(pf);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -7369,8 +7375,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
}
/* configure PTP timestamping after VSI rebuild */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_cfg_timestamp(pf, false);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) {
+ if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
+ ice_ptp_cfg_timestamp(pf, false);
+ else if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL)
+ /* for E82x PHC owner always need to have interrupts */
+ ice_ptp_cfg_timestamp(pf, true);
+ }
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 81d96a40d5a7..5293df2d57a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -256,6 +256,24 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
}
/**
+ * ice_ptp_configure_tx_tstamp - Enable or disable Tx timestamp interrupt
+ * @pf: The PF pointer to search in
+ * @on: bool value for whether timestamp interrupt is enabled or disabled
+ */
+static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on)
+{
+ u32 val;
+
+ /* Configure the Tx timestamp interrupt */
+ val = rd32(&pf->hw, PFINT_OICR_ENA);
+ if (on)
+ val |= PFINT_OICR_TSYN_TX_M;
+ else
+ val &= ~PFINT_OICR_TSYN_TX_M;
+ wr32(&pf->hw, PFINT_OICR_ENA, val);
+}
+
+/**
* ice_set_tx_tstamp - Enable or disable Tx timestamping
* @pf: The PF pointer to search in
* @on: bool value for whether timestamps are enabled or disabled
@@ -263,7 +281,6 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
{
struct ice_vsi *vsi;
- u32 val;
u16 i;
vsi = ice_get_main_vsi(pf);
@@ -277,13 +294,8 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
vsi->tx_rings[i]->ptp_tx = on;
}
- /* Configure the Tx timestamp interrupt */
- val = rd32(&pf->hw, PFINT_OICR_ENA);
- if (on)
- val |= PFINT_OICR_TSYN_TX_M;
- else
- val &= ~PFINT_OICR_TSYN_TX_M;
- wr32(&pf->hw, PFINT_OICR_ENA, val);
+ if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
+ ice_ptp_configure_tx_tstamp(pf, on);
pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
}
@@ -328,131 +340,6 @@ void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
}
/**
- * ice_get_ptp_clock_index - Get the PTP clock index
- * @pf: the PF pointer
- *
- * Determine the clock index of the PTP clock associated with this device. If
- * this is the PF controlling the clock, just use the local access to the
- * clock device pointer.
- *
- * Otherwise, read from the driver shared parameters to determine the clock
- * index value.
- *
- * Returns: the index of the PTP clock associated with this device, or -1 if
- * there is no associated clock.
- */
-int ice_get_ptp_clock_index(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_aqc_driver_params param_idx;
- struct ice_hw *hw = &pf->hw;
- u8 tmr_idx;
- u32 value;
- int err;
-
- /* Use the ptp_clock structure if we're the main PF */
- if (pf->ptp.clock)
- return ptp_clock_index(pf->ptp.clock);
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- if (!tmr_idx)
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
- else
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
-
- err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
- if (err) {
- dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
- return -1;
- }
-
- /* The PTP clock index is an integer, and will be between 0 and
- * INT_MAX. The highest bit of the driver shared parameter is used to
- * indicate whether or not the currently stored clock index is valid.
- */
- if (!(value & PTP_SHARED_CLK_IDX_VALID))
- return -1;
-
- return value & ~PTP_SHARED_CLK_IDX_VALID;
-}
-
-/**
- * ice_set_ptp_clock_index - Set the PTP clock index
- * @pf: the PF pointer
- *
- * Set the PTP clock index for this device into the shared driver parameters,
- * so that other PFs associated with this device can read it.
- *
- * If the PF is unable to store the clock index, it will log an error, but
- * will continue operating PTP.
- */
-static void ice_set_ptp_clock_index(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_aqc_driver_params param_idx;
- struct ice_hw *hw = &pf->hw;
- u8 tmr_idx;
- u32 value;
- int err;
-
- if (!pf->ptp.clock)
- return;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- if (!tmr_idx)
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
- else
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
-
- value = (u32)ptp_clock_index(pf->ptp.clock);
- if (value > INT_MAX) {
- dev_err(dev, "PTP Clock index is too large to store\n");
- return;
- }
- value |= PTP_SHARED_CLK_IDX_VALID;
-
- err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
- if (err) {
- dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
- }
-}
-
-/**
- * ice_clear_ptp_clock_index - Clear the PTP clock index
- * @pf: the PF pointer
- *
- * Clear the PTP clock index for this device. Must be called when
- * unregistering the PTP clock, in order to ensure other PFs stop reporting
- * a clock object that no longer exists.
- */
-static void ice_clear_ptp_clock_index(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_aqc_driver_params param_idx;
- struct ice_hw *hw = &pf->hw;
- u8 tmr_idx;
- int err;
-
- /* Do not clear the index if we don't own the timer */
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- if (!tmr_idx)
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
- else
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
-
- err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
- if (err) {
- dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
- }
-}
-
-/**
* ice_ptp_read_src_clk_reg - Read the source clock register
* @pf: Board private structure
* @sts: Optional parameter for holding a pair of system timestamps from
@@ -674,9 +561,6 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
int err;
u8 idx;
- if (!tx->init)
- return;
-
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
hw = &pf->hw;
@@ -775,6 +659,39 @@ skip_ts_read:
}
/**
+ * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
+ * @pf: Board private structure
+ */
+static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
+{
+ struct ice_ptp_port *port;
+ unsigned int i;
+
+ mutex_lock(&pf->ptp.ports_owner.lock);
+ list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
+ struct ice_ptp_tx *tx = &port->tx;
+
+ if (!tx || !tx->init)
+ continue;
+
+ ice_ptp_process_tx_tstamp(tx);
+ }
+ mutex_unlock(&pf->ptp.ports_owner.lock);
+
+ for (i = 0; i < ICE_MAX_QUAD; i++) {
+ u64 tstamp_ready;
+ int err;
+
+ /* Read the Tx ready status first */
+ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
+ if (err || tstamp_ready)
+ return ICE_TX_TSTAMP_WORK_PENDING;
+ }
+
+ return ICE_TX_TSTAMP_WORK_DONE;
+}
+
+/**
* ice_ptp_tx_tstamp - Process Tx timestamps for this function.
* @tx: Tx tracking structure to initialize
*
@@ -1366,6 +1283,7 @@ out_unlock:
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
{
struct ice_ptp_port *ptp_port;
+ struct ice_hw *hw = &pf->hw;
if (!test_bit(ICE_FLAG_PTP, pf->flags))
return;
@@ -1380,11 +1298,16 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
- /* E810 devices do not need to reconfigure the PHY */
- if (ice_is_e810(&pf->hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
+ /* Do not reconfigure E810 PHY */
return;
-
- ice_ptp_port_phy_restart(ptp_port);
+ case ICE_PHY_E822:
+ ice_ptp_port_phy_restart(ptp_port);
+ return;
+ default:
+ dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
+ }
}
/**
@@ -1441,6 +1364,24 @@ static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
}
/**
+ * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
+ * @pf: Board private structure
+ */
+static void ice_ptp_restart_all_phy(struct ice_pf *pf)
+{
+ struct list_head *entry;
+
+ list_for_each(entry, &pf->ptp.ports_owner.ports) {
+ struct ice_ptp_port *port = list_entry(entry,
+ struct ice_ptp_port,
+ list_member);
+
+ if (port->link_up)
+ ice_ptp_port_phy_restart(port);
+ }
+}
+
+/**
* ice_ptp_adjfine - Adjust clock increment rate
* @info: the driver's PTP info structure
* @scaled_ppm: Parts per million with 16-bit fractional field
@@ -1877,9 +1818,9 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
- /* Recalibrate and re-enable timestamp block */
- if (pf->ptp.port.link_up)
- ice_ptp_port_phy_restart(&pf->ptp.port);
+ /* Recalibrate and re-enable timestamp blocks for E822/E823 */
+ if (hw->phy_model == ICE_PHY_E822)
+ ice_ptp_restart_all_phy(pf);
exit:
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
@@ -1976,21 +1917,32 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
u32 hh_lock, hh_art_ctl;
int i;
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+#define MAX_HH_HW_LOCK_TRIES 5
+#define MAX_HH_CTL_LOCK_TRIES 100
+
+ for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
+ /* Get the HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ if (hh_lock & PFHH_SEM_BUSY_M) {
+ usleep_range(10000, 15000);
+ continue;
+ }
+ break;
+ }
if (hh_lock & PFHH_SEM_BUSY_M) {
dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
- return -EFAULT;
+ return -EBUSY;
}
+ /* Program cmd to master timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
/* Start the ART and device clock sync sequence */
hh_art_ctl = rd32(hw, GLHH_ART_CTL);
hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-#define MAX_HH_LOCK_TRIES 100
-
- for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
+ for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
/* Wait for sync to complete */
hh_art_ctl = rd32(hw, GLHH_ART_CTL);
if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
@@ -2014,19 +1966,23 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
break;
}
}
+
+ /* Clear the master timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
/* Release HW lock */
hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
- if (i == MAX_HH_LOCK_TRIES)
+ if (i == MAX_HH_CTL_LOCK_TRIES)
return -ETIMEDOUT;
return 0;
}
/**
- * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2034,14 +1990,14 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 devices which have support for generating the
- * cross timestamp via PCIe PTM.
+ * This is only valid for E822 and E823 devices which have support for
+ * generating the cross timestamp via PCIe PTM.
*
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
*/
static int
-ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
+ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
@@ -2246,18 +2202,20 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
static void
ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
- info->n_per_out = N_PER_OUT_E810;
-
- if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
- info->n_ext_ts = N_EXT_TS_E810;
-
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
info->n_ext_ts = N_EXT_TS_E810;
+ info->n_per_out = N_PER_OUT_E810T;
info->n_pins = NUM_PTP_PINS_E810T;
info->verify = ice_verify_pin_e810t;
/* Complete setup of the SMA pins */
ice_ptp_setup_sma_pins_e810t(pf, info);
+ } else if (ice_is_e810t(&pf->hw)) {
+ info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
+ info->n_per_out = N_PER_OUT_NO_SMA_E810T;
+ } else {
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
}
@@ -2275,22 +2233,22 @@ ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
- * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
+ * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
* @pf: Board private structure
* @info: PTP info to fill
*
- * Assign functions to the PTP capabiltiies structure for E822 devices.
+ * Assign functions to the PTP capabiltiies structure for E82x devices.
* Functions which operate across all device families should be set directly
- * in ice_ptp_set_caps. Only add functions here which are distinct for E822
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
* devices.
*/
static void
-ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
+ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
{
#ifdef CONFIG_ICE_HWTS
if (boot_cpu_has(X86_FEATURE_ART) &&
boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- info->getcrosststamp = ice_ptp_getcrosststamp_e822;
+ info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
#endif /* CONFIG_ICE_HWTS */
}
@@ -2324,6 +2282,8 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
static void
ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
{
+ ice_ptp_set_funcs_e82x(pf, info);
+
info->enable = ice_ptp_gpio_enable_e823;
ice_ptp_setup_pins_e823(pf, info);
}
@@ -2351,7 +2311,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
else if (ice_is_e823(&pf->hw))
ice_ptp_set_funcs_e823(pf, info);
else
- ice_ptp_set_funcs_e822(pf, info);
+ ice_ptp_set_funcs_e82x(pf, info);
}
/**
@@ -2366,7 +2326,6 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
static long ice_ptp_create_clock(struct ice_pf *pf)
{
struct ptp_clock_info *info;
- struct ptp_clock *clock;
struct device *dev;
/* No need to create a clock device if we already have one */
@@ -2379,11 +2338,11 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
dev = ice_pf_to_dev(pf);
/* Attempt to register the clock before enabling the hardware. */
- clock = ptp_clock_register(info, dev);
- if (IS_ERR(clock))
- return PTR_ERR(clock);
-
- pf->ptp.clock = clock;
+ pf->ptp.clock = ptp_clock_register(info, dev);
+ if (IS_ERR(pf->ptp.clock)) {
+ dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
+ return PTR_ERR(pf->ptp.clock);
+ }
return 0;
}
@@ -2440,7 +2399,21 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
*/
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
{
- return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
+ switch (pf->ptp.tx_interrupt_mode) {
+ case ICE_PTP_TX_INTERRUPT_NONE:
+ /* This device has the clock owner handle timestamps for it */
+ return ICE_TX_TSTAMP_WORK_DONE;
+ case ICE_PTP_TX_INTERRUPT_SELF:
+ /* This device handles its own timestamps */
+ return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
+ case ICE_PTP_TX_INTERRUPT_ALL:
+ /* This device handles timestamps for all ports */
+ return ice_ptp_tx_tstamp_owner(pf);
+ default:
+ WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
+ pf->ptp.tx_interrupt_mode);
+ return ICE_TX_TSTAMP_WORK_DONE;
+ }
}
static void ice_ptp_periodic_work(struct kthread_work *work)
@@ -2474,7 +2447,7 @@ void ice_ptp_reset(struct ice_pf *pf)
if (test_bit(ICE_PFR_REQ, pf->state))
goto pfr;
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ if (!ice_pf_src_tmr_owned(pf))
goto reset_ts;
err = ice_ptp_init_phc(hw);
@@ -2550,6 +2523,209 @@ err:
}
/**
+ * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
+ * @aux_dev: auxiliary device to get the auxiliary PF for
+ */
+static struct ice_pf *
+ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
+{
+ struct ice_ptp_port *aux_port;
+ struct ice_ptp *aux_ptp;
+
+ aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
+ aux_ptp = container_of(aux_port, struct ice_ptp, port);
+
+ return container_of(aux_ptp, struct ice_pf, ptp);
+}
+
+/**
+ * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
+ * @aux_dev: auxiliary device to get the PF for
+ */
+static struct ice_pf *
+ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
+{
+ struct ice_ptp_port_owner *ports_owner;
+ struct auxiliary_driver *aux_drv;
+ struct ice_ptp *owner_ptp;
+
+ if (!aux_dev->dev.driver)
+ return NULL;
+
+ aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
+ ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
+ aux_driver);
+ owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
+ return container_of(owner_ptp, struct ice_pf, ptp);
+}
+
+/**
+ * ice_ptp_auxbus_probe - Probe auxiliary devices
+ * @aux_dev: PF's auxiliary device
+ * @id: Auxiliary device ID
+ */
+static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+{
+ struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+ struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+
+ if (WARN_ON(!owner_pf))
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
+ mutex_lock(&owner_pf->ptp.ports_owner.lock);
+ list_add(&aux_pf->ptp.port.list_member,
+ &owner_pf->ptp.ports_owner.ports);
+ mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
+ * @aux_dev: PF's auxiliary device
+ */
+static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
+{
+ struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+ struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+
+ mutex_lock(&owner_pf->ptp.ports_owner.lock);
+ list_del(&aux_pf->ptp.port.list_member);
+ mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+}
+
+/**
+ * ice_ptp_auxbus_shutdown
+ * @aux_dev: PF's auxiliary device
+ */
+static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
+{
+ /* Doing nothing here, but handle to auxbus driver must be satisfied */
+}
+
+/**
+ * ice_ptp_auxbus_suspend
+ * @aux_dev: PF's auxiliary device
+ * @state: power management state indicator
+ */
+static int
+ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
+{
+ /* Doing nothing here, but handle to auxbus driver must be satisfied */
+ return 0;
+}
+
+/**
+ * ice_ptp_auxbus_resume
+ * @aux_dev: PF's auxiliary device
+ */
+static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
+{
+ /* Doing nothing here, but handle to auxbus driver must be satisfied */
+ return 0;
+}
+
+/**
+ * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
+ * @pf: Board private structure
+ * @name: auxiliary bus driver name
+ */
+static struct auxiliary_device_id *
+ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
+{
+ struct auxiliary_device_id *ids;
+
+ /* Second id left empty to terminate the array */
+ ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
+ sizeof(struct auxiliary_device_id), GFP_KERNEL);
+ if (!ids)
+ return NULL;
+
+ snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
+
+ return ids;
+}
+
+/**
+ * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
+ * @pf: Board private structure
+ */
+static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
+{
+ struct auxiliary_driver *aux_driver;
+ struct ice_ptp *ptp;
+ struct device *dev;
+ char *name;
+ int err;
+
+ ptp = &pf->ptp;
+ dev = ice_pf_to_dev(pf);
+ aux_driver = &ptp->ports_owner.aux_driver;
+ INIT_LIST_HEAD(&ptp->ports_owner.ports);
+ mutex_init(&ptp->ports_owner.lock);
+ name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
+ pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
+ ice_get_ptp_src_clock_index(&pf->hw));
+
+ aux_driver->name = name;
+ aux_driver->shutdown = ice_ptp_auxbus_shutdown;
+ aux_driver->suspend = ice_ptp_auxbus_suspend;
+ aux_driver->remove = ice_ptp_auxbus_remove;
+ aux_driver->resume = ice_ptp_auxbus_resume;
+ aux_driver->probe = ice_ptp_auxbus_probe;
+ aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
+ if (!aux_driver->id_table)
+ return -ENOMEM;
+
+ err = auxiliary_driver_register(aux_driver);
+ if (err) {
+ devm_kfree(dev, aux_driver->id_table);
+ dev_err(dev, "Failed registering aux_driver, name <%s>\n",
+ name);
+ }
+
+ return err;
+}
+
+/**
+ * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
+ * @pf: Board private structure
+ */
+static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
+{
+ struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
+
+ auxiliary_driver_unregister(aux_driver);
+ devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
+
+ mutex_destroy(&pf->ptp.ports_owner.lock);
+}
+
+/**
+ * ice_ptp_clock_index - Get the PTP clock index for this device
+ * @pf: Board private structure
+ *
+ * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
+ * is associated.
+ */
+int ice_ptp_clock_index(struct ice_pf *pf)
+{
+ struct auxiliary_device *aux_dev;
+ struct ice_pf *owner_pf;
+ struct ptp_clock *clock;
+
+ aux_dev = &pf->ptp.port.aux_dev;
+ owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+ if (!owner_pf)
+ return -1;
+ clock = owner_pf->ptp.clock;
+
+ return clock ? ptp_clock_index(clock) : -1;
+}
+
+/**
* ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
*/
@@ -2627,7 +2803,15 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (!ice_is_e810(hw)) {
+ if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) {
+ /* The clock owner for this device type handles the timestamp
+ * interrupt for all ports.
+ */
+ ice_ptp_configure_tx_tstamp(pf, true);
+
+ /* React on all quads interrupts for E82x */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
+
/* Enable quad interrupts */
err = ice_ptp_tx_ena_intr(pf, true, itr);
if (err)
@@ -2639,11 +2823,15 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
if (err)
goto err_clk;
- /* Store the PTP clock index for other PFs */
- ice_set_ptp_clock_index(pf);
+ err = ice_ptp_register_auxbus_driver(pf);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
+ goto err_aux;
+ }
return 0;
-
+err_aux:
+ ptp_clock_unregister(pf->ptp.clock);
err_clk:
pf->ptp.clock = NULL;
err_exit:
@@ -2685,14 +2873,124 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
+ struct ice_hw *hw = &pf->hw;
+
mutex_init(&ptp_port->ps_lock);
- if (ice_is_e810(&pf->hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
+ case ICE_PHY_E822:
+ /* Non-owner PFs don't react to any interrupts on E82x,
+ * neither on own quad nor on others
+ */
+ if (!ice_ptp_pf_handles_tx_interrupt(pf)) {
+ ice_ptp_configure_tx_tstamp(pf, false);
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
+ }
+ kthread_init_delayed_work(&ptp_port->ov_work,
+ ice_ptp_wait_for_offsets);
+
+ return ice_ptp_init_tx_e822(pf, &ptp_port->tx,
+ ptp_port->port_num);
+ default:
+ return -ENODEV;
+ }
+}
+
+/**
+ * ice_ptp_release_auxbus_device
+ * @dev: device that utilizes the auxbus
+ */
+static void ice_ptp_release_auxbus_device(struct device *dev)
+{
+ /* Doing nothing here, but handle to auxbux device must be satisfied */
+}
+
+/**
+ * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
+ * @pf: Board private structure
+ */
+static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
+{
+ struct auxiliary_device *aux_dev;
+ struct ice_ptp *ptp;
+ struct device *dev;
+ char *name;
+ int err;
+ u32 id;
- kthread_init_delayed_work(&ptp_port->ov_work,
- ice_ptp_wait_for_offsets);
- return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
+ ptp = &pf->ptp;
+ id = ptp->port.port_num;
+ dev = ice_pf_to_dev(pf);
+
+ aux_dev = &ptp->port.aux_dev;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
+ pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
+ ice_get_ptp_src_clock_index(&pf->hw));
+
+ aux_dev->name = name;
+ aux_dev->id = id;
+ aux_dev->dev.release = ice_ptp_release_auxbus_device;
+ aux_dev->dev.parent = dev;
+
+ err = auxiliary_device_init(aux_dev);
+ if (err)
+ goto aux_err;
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ auxiliary_device_uninit(aux_dev);
+ goto aux_err;
+ }
+
+ return 0;
+aux_err:
+ dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
+ devm_kfree(dev, name);
+ return err;
+}
+
+/**
+ * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
+ * @pf: Board private structure
+ */
+static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
+{
+ struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
+
+ auxiliary_device_delete(aux_dev);
+ auxiliary_device_uninit(aux_dev);
+
+ memset(aux_dev, 0, sizeof(*aux_dev));
+}
+
+/**
+ * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
+ * @pf: Board private structure
+ *
+ * Initialize the Tx timestamp interrupt mode for this device. For most device
+ * types, each PF processes the interrupt and manages its own timestamps. For
+ * E822-based devices, only the clock owner processes the timestamps. Other
+ * PFs disable the interrupt and do not process their own timestamps.
+ */
+static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
+{
+ switch (pf->hw.phy_model) {
+ case ICE_PHY_E822:
+ /* E822 based PHY has the clock owner process the interrupt
+ * for all ports.
+ */
+ if (ice_pf_src_tmr_owned(pf))
+ pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
+ else
+ pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
+ break;
+ default:
+ /* other PHY types handle their own Tx interrupt */
+ pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
+ }
}
/**
@@ -2713,10 +3011,14 @@ void ice_ptp_init(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int err;
+ ice_ptp_init_phy_model(hw);
+
+ ice_ptp_init_tx_interrupt_mode(pf);
+
/* If this function owns the clock hardware, it must allocate and
* configure the PTP clock device to represent it.
*/
- if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ if (ice_pf_src_tmr_owned(pf)) {
err = ice_ptp_init_owner(pf);
if (err)
goto err;
@@ -2735,6 +3037,10 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err;
+ err = ice_ptp_create_auxbus_device(pf);
+ if (err)
+ goto err;
+
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
@@ -2763,6 +3069,8 @@ void ice_ptp_release(struct ice_pf *pf)
/* Disable timestamping for both Tx and Rx */
ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_remove_auxbus_device(pf);
+
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
clear_bit(ICE_FLAG_PTP, pf->flags);
@@ -2782,9 +3090,10 @@ void ice_ptp_release(struct ice_pf *pf)
/* Disable periodic outputs */
ice_ptp_disable_all_clkout(pf);
- ice_clear_ptp_clock_index(pf);
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
+ ice_ptp_unregister_auxbus_driver(pf);
+
dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 995a57019ba7..8f6f94392756 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -157,7 +157,9 @@ struct ice_ptp_tx {
* ready for PTP functionality. It is used to track the port initialization
* and determine when the port's PHY offset is valid.
*
+ * @list_member: list member structure of auxiliary device
* @tx: Tx timestamp tracking for this port
+ * @aux_dev: auxiliary device associated with this port
* @ov_work: delayed work task for tracking when PHY offset is valid
* @ps_lock: mutex used to protect the overall PTP PHY start procedure
* @link_up: indicates whether the link is up
@@ -165,7 +167,9 @@ struct ice_ptp_tx {
* @port_num: the port number this structure represents
*/
struct ice_ptp_port {
+ struct list_head list_member;
struct ice_ptp_tx tx;
+ struct auxiliary_device aux_dev;
struct kthread_delayed_work ov_work;
struct mutex ps_lock; /* protects overall PTP PHY start procedure */
bool link_up;
@@ -173,11 +177,35 @@ struct ice_ptp_port {
u8 port_num;
};
+enum ice_ptp_tx_interrupt {
+ ICE_PTP_TX_INTERRUPT_NONE = 0,
+ ICE_PTP_TX_INTERRUPT_SELF,
+ ICE_PTP_TX_INTERRUPT_ALL,
+};
+
+/**
+ * struct ice_ptp_port_owner - data used to handle the PTP clock owner info
+ *
+ * This structure contains data necessary for the PTP clock owner to correctly
+ * handle the timestamping feature for all attached ports.
+ *
+ * @aux_driver: the structure carring the auxiliary driver information
+ * @ports: list of porst handled by this port owner
+ * @lock: protect access to ports list
+ */
+struct ice_ptp_port_owner {
+ struct auxiliary_driver aux_driver;
+ struct list_head ports;
+ struct mutex lock;
+};
+
#define GLTSYN_TGT_H_IDX_MAX 4
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
+ * @ports_owner: data for the auxiliary driver owner
* @work: delayed work function for periodic tasks
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
* @cached_phc_jiffies: jiffies when cached_phc_time was last updated
@@ -197,7 +225,9 @@ struct ice_ptp_port {
* @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
+ enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
+ struct ice_ptp_port_owner ports_owner;
struct kthread_delayed_work work;
u64 cached_phc_time;
unsigned long cached_phc_jiffies;
@@ -258,11 +288,11 @@ struct ice_ptp {
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
-int ice_get_ptp_clock_index(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
@@ -288,10 +318,6 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
}
static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
-static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
-{
- return -1;
-}
static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
static inline s8
@@ -314,5 +340,10 @@ static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
{
}
+
+static inline int ice_ptp_clock_index(struct ice_pf *pf)
+{
+ return -1;
+}
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index f818dd215c05..de16cf14c4b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -7,6 +7,132 @@
#include "ice_ptp_consts.h"
#include "ice_cgu_regs.h"
+static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
+ DPLL_PIN_FREQUENCY_1PPS,
+ DPLL_PIN_FREQUENCY_10MHZ,
+};
+
+static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = {
+ DPLL_PIN_FREQUENCY_1PPS,
+};
+
+static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = {
+ DPLL_PIN_FREQUENCY_10MHZ,
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
+ { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, },
+ { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, },
+ { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, },
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
+ { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, },
+ { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, },
+ { "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, },
+ { "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, },
+ { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, },
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
+ { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
+ { "MAC-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
+ { "CVL-SDP21", ZL_OUT4, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "CVL-SDP23", ZL_OUT5, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = {
+ { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "PHY2-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "MAC-CLK", ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "CVL-SDP21", ZL_OUT5, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "CVL-SDP23", ZL_OUT6, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = {
+ { "NONE", SI_REF0P, 0, 0 },
+ { "NONE", SI_REF0N, 0, 0 },
+ { "SYNCE0_DP", SI_REF1P, DPLL_PIN_TYPE_MUX, 0 },
+ { "SYNCE0_DN", SI_REF1N, DPLL_PIN_TYPE_MUX, 0 },
+ { "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "NONE", SI_REF2N, 0, 0 },
+ { "EXT_PPS_OUT", SI_REF3, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "INT_PPS_OUT", SI_REF4, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = {
+ { "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "PHY-CLK", SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "10MHZ-SMA2", SI_OUT2, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
+ { "PPS-SMA1", SI_OUT3, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = {
+ { "NONE", ZL_REF0P, 0, 0 },
+ { "INT_PPS_OUT", ZL_REF0N, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "SYNCE0_DP", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 },
+ { "SYNCE0_DN", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 },
+ { "NONE", ZL_REF2P, 0, 0 },
+ { "NONE", ZL_REF2N, 0, 0 },
+ { "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "NONE", ZL_REF3N, 0, 0 },
+ { "EXT_PPS_OUT", ZL_REF4P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
+ { "PPS-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "10MHZ-SMA2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
+ { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "NONE", ZL_OUT5, 0, 0 },
+};
+
/* Low level functions for interacting with and managing the device clock used
* for the Precision Time Protocol.
*
@@ -107,7 +233,7 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
*
* Prepare the source timer for an upcoming timer sync command.
*/
-static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val;
u8 tmr_idx;
@@ -116,19 +242,19 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
cmd_val = tmr_idx << SEL_CPK_SRC;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val |= GLTSYN_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val |= GLTSYN_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val |= GLTSYN_CMD_ADJ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val |= GLTSYN_CMD_READ_TIME;
break;
case ICE_PTP_NOP:
@@ -168,9 +294,9 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY;
- phy = port / ICE_PORTS_PER_PHY;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
+ phy_port = port % ICE_PORTS_PER_PHY_E822;
+ phy = port / ICE_PORTS_PER_PHY_E822;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822;
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -495,20 +621,25 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* Fill a message buffer for accessing a register in a quad shared between
* multiple PHYs.
*/
-static void
+static int
ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
{
u32 addr;
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
msg->dest_dev = rmn_0;
- if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+ if ((quad % ICE_QUADS_PER_PHY_E822) == 0)
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
msg->msg_addr_low = lower_16_bits(addr);
msg->msg_addr_high = upper_16_bits(addr);
+
+ return 0;
}
/**
@@ -527,10 +658,10 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- if (quad >= ICE_MAX_QUAD)
- return -EINVAL;
+ err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ if (err)
+ return err;
- ice_fill_quad_msg_e822(&msg, quad, offset);
msg.opcode = ice_sbq_msg_rd;
err = ice_sbq_rw_reg(hw, &msg);
@@ -561,10 +692,10 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- if (quad >= ICE_MAX_QUAD)
- return -EINVAL;
+ err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ if (err)
+ return err;
- ice_fill_quad_msg_e822(&msg, quad, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
@@ -628,29 +759,32 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* @quad: the quad to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
- * shared between the internal PHYs on the E822 devices.
+ * Read the timestamp out of the quad to clear its timestamp status bit from
+ * the PHY quad block that is shared between the internal PHYs of the E822
+ * devices.
+ *
+ * Note that unlike E810, software cannot directly write to the quad memory
+ * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function
+ * to determine which timestamps are valid. Reading a timestamp auto-clears
+ * the valid bit.
+ *
+ * To directly clear the contents of the timestamp block entirely, discarding
+ * all timestamp data at once, software should instead use
+ * ice_ptp_reset_ts_memory_quad_e822().
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
*/
static int
ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
{
- u16 lo_addr, hi_addr;
+ u64 unused_tstamp;
int err;
- lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
- hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
-
- err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+ err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp);
if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
- err);
- return err;
- }
-
- err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
- err);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
+ quad, idx, err);
return err;
}
@@ -1025,7 +1159,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
* @time: Time to initialize the PHY port clocks to
*
* Program the PHY port registers with a new initial time value. The port
- * clock will be initialized once the driver issues an INIT_TIME sync
+ * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
* command. The time value is the upper 32 bits of the PHY timer, usually in
* units of nominal nanoseconds.
*/
@@ -1074,7 +1208,7 @@ exit_err:
*
* Program the port for an atomic adjustment by writing the Tx and Rx timer
* registers. The atomic adjustment won't be completed until the driver issues
- * an ADJ_TIME command.
+ * an ICE_PTP_ADJ_TIME command.
*
* Note that time is not in units of nanoseconds. It is in clock time
* including the lower sub-nanosecond portion of the port timer.
@@ -1127,7 +1261,7 @@ exit_err:
*
* Prepare the PHY ports for an atomic time adjustment by programming the PHY
* Tx and Rx port registers. The actual adjustment is completed by issuing an
- * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
+ * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
*/
static int
ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
@@ -1162,7 +1296,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
*
* Prepare each of the PHY ports for a new increment value by programming the
* port's TIMETUS registers. The new increment value will be updated after
- * issuing an INIT_INCVAL command.
+ * issuing an ICE_PTP_INIT_INCVAL command.
*/
static int
ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
@@ -1248,19 +1382,19 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
tmr_idx = ice_get_ptp_src_clock_index(hw);
cmd_val = tmr_idx << SEL_PHY_SRC;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val |= PHY_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val |= PHY_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val |= PHY_CMD_ADJ_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val |= PHY_CMD_READ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
break;
case ICE_PTP_NOP:
@@ -2196,8 +2330,8 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* @phy_time: on return, the 64bit PHY timer value
* @phc_time: on return, the lower 64bits of PHC time
*
- * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
- * timer values.
+ * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
+ * and PHC timer values.
*/
static int
ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
@@ -2210,15 +2344,15 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
tmr_idx = ice_get_ptp_src_clock_index(hw);
- /* Prepare the PHC timer for a READ_TIME capture command */
- ice_ptp_src_cmd(hw, READ_TIME);
+ /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
- /* Prepare the PHY timer for a READ_TIME capture command */
- err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
+ /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
if (err)
return err;
- /* Issue the sync to start the READ_TIME capture */
+ /* Issue the sync to start the ICE_PTP_READ_TIME capture */
ice_ptp_exec_tmr_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
@@ -2252,10 +2386,11 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
* @port: the PHY port to synchronize
*
* Perform an adjustment to ensure that the PHY and PHC timers are in sync.
- * This is done by issuing a READ_TIME command which triggers a simultaneous
- * read of the PHY timer and PHC timer. Then we use the difference to
- * calculate an appropriate 2s complement addition to add to the PHY timer in
- * order to ensure it reads the same value as the primary PHC timer.
+ * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
+ * simultaneous read of the PHY timer and PHC timer. Then we use the
+ * difference to calculate an appropriate 2s complement addition to add
+ * to the PHY timer in order to ensure it reads the same value as the
+ * primary PHC timer.
*/
static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
{
@@ -2285,7 +2420,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
goto err_unlock;
- err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
if (err)
goto err_unlock;
@@ -2408,7 +2543,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
return err;
- err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
if (err)
return err;
@@ -2436,7 +2571,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
return err;
- err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
if (err)
return err;
@@ -2685,28 +2820,39 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
* @lport: the lport to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block of the
- * external PHY on the E810 device.
+ * Read the timestamp and then forcibly overwrite its value to clear the valid
+ * bit from the timestamp block of the external PHY on the E810 device.
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
*/
static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
{
u32 lo_addr, hi_addr;
+ u64 unused_tstamp;
int err;
+ err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n",
+ lport, idx, err);
+ return err;
+ }
+
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
err = ice_write_phy_reg_e810(hw, lo_addr, 0);
if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
- err);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n",
+ lport, idx, err);
return err;
}
err = ice_write_phy_reg_e810(hw, hi_addr, 0);
if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
- err);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n",
+ lport, idx, err);
return err;
}
@@ -2757,7 +2903,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw)
*
* Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
* initial clock time. The time will not actually be programmed until the
- * driver issues an INIT_TIME command.
+ * driver issues an ICE_PTP_INIT_TIME command.
*
* The time value is the upper 32 bits of the PHY timer, usually in units of
* nominal nanoseconds.
@@ -2792,7 +2938,7 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
*
* Prepare the PHY port for an atomic adjustment by programming the PHY
* ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
- * is completed by issuing an ADJ_TIME sync command.
+ * is completed by issuing an ICE_PTP_ADJ_TIME sync command.
*
* The adjustment value only contains the portion used for the upper 32bits of
* the PHY timer, usually in units of nominal nanoseconds. Negative
@@ -2832,7 +2978,7 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
*
* Prepare the PHY port for a new increment value by programming the PHY
* ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
- * completed by issuing an INIT_INCVAL command.
+ * completed by issuing an ICE_PTP_INIT_INCVAL command.
*/
static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
{
@@ -2875,19 +3021,19 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
int err;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val = GLTSYN_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val = GLTSYN_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val = GLTSYN_CMD_ADJ_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val = GLTSYN_CMD_READ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
break;
case ICE_PTP_NOP:
@@ -3150,6 +3296,21 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
+ * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
+ * @hw: pointer to the HW structure
+ *
+ * Determine the PHY model for the device, and initialize hw->phy_model
+ * for use by other functions.
+ */
+void ice_ptp_init_phy_model(struct ice_hw *hw)
+{
+ if (ice_is_e810(hw))
+ hw->phy_model = ICE_PHY_E810;
+ else
+ hw->phy_model = ICE_PHY_E822;
+}
+
+/**
* ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
* @hw: pointer to HW struct
* @cmd: the command to issue
@@ -3167,10 +3328,17 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_port_cmd_e810(hw, cmd);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_port_cmd_e822(hw, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
cmd, err);
@@ -3212,14 +3380,21 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err)
return err;
- return ice_ptp_tmr_cmd(hw, INIT_TIME);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME);
}
/**
@@ -3232,8 +3407,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
*
* 1) Write the increment value to the source timer shadow registers
* 2) Write the increment value to the PHY timer shadow registers
- * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
- * source and port timers to the new increment value at the next clock
+ * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both
+ * the source and port timers to the new increment value at the next clock
* cycle.
*/
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
@@ -3247,14 +3422,21 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err)
return err;
- return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL);
}
/**
@@ -3288,8 +3470,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
*
* 1) Write the adjustment to the source timer shadow registers
* 2) Write the adjustment to the PHY timer shadow registers
- * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
- * both the source and port timers at the next clock cycle.
+ * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the
+ * adjustment to both the source and port timers at the next clock cycle.
*/
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
{
@@ -3299,21 +3481,28 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
- * For an ADJ_TIME command, this set of registers represents the value
- * to add to the clock time. It supports subtraction by interpreting
- * the value as a 2's complement integer.
+ * For an ICE_PTP_ADJ_TIME command, this set of registers represents
+ * the value to add to the clock time. It supports subtraction by
+ * interpreting the value as a 2's complement integer.
*/
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err)
return err;
- return ice_ptp_tmr_cmd(hw, ADJ_TIME);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME);
}
/**
@@ -3329,10 +3518,14 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- else
+ case ICE_PHY_E822:
return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -3341,16 +3534,71 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
* @block: the block to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block. For
- * E822 devices, the block is the quad to clear from. For E810 devices, the
- * block is the logical port to clear from.
+ * Clear a timestamp from the timestamp block, discarding its value without
+ * returning it. This resets the memory status bit for the timestamp index
+ * allowing it to be reused for another timestamp in the future.
+ *
+ * For E822 devices, the block number is the PHY quad to clear from. For E810
+ * devices, the block number is the logical port to clear from.
+ *
+ * This function must only be called on a timestamp index whose valid bit is
+ * set according to ice_get_phy_tx_tstamp_ready().
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- else
+ case ICE_PHY_E822:
return ice_clear_phy_tstamp_e822(hw, block, idx);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_get_pf_c827_idx - find and return the C827 index for the current pf
+ * @hw: pointer to the hw struct
+ * @idx: index of the found C827 PHY
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
+{
+ struct ice_aqc_get_link_topo cmd;
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
+ u8 ctx;
+
+ if (hw->mac_type != ICE_MAC_E810)
+ return -ENODEV;
+
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP) {
+ *idx = C827_0;
+ return 0;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
+ ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
+ cmd.addr.topo_params.node_type_ctx = ctx;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
+ return -ENOENT;
+
+ if (node_handle == E810C_QSFP_C827_0_HANDLE)
+ *idx = C827_0;
+ else if (node_handle == E810C_QSFP_C827_1_HANDLE)
+ *idx = C827_1;
+ else
+ return -EIO;
+
+ return 0;
}
/**
@@ -3359,10 +3607,14 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E822:
+ ice_ptp_reset_ts_memory_e822(hw);
+ break;
+ case ICE_PHY_E810:
+ default:
return;
-
- ice_ptp_reset_ts_memory_e822(hw);
+ }
}
/**
@@ -3381,10 +3633,14 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_ptp_init_phc_e810(hw);
- else
+ case ICE_PHY_E822:
return ice_ptp_init_phc_e822(hw);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -3400,10 +3656,308 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- else
+ case ICE_PHY_E822:
return ice_get_phy_tx_tstamp_ready_e822(hw, block,
tstamp_ready);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_cgu_get_pin_desc_e823 - get pin description array
+ * @hw: pointer to the hw struct
+ * @input: if request is done against input or output pin
+ * @size: number of inputs/outputs
+ *
+ * Return: pointer to pin description array associated to given hw.
+ */
+static const struct ice_cgu_pin_desc *
+ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size)
+{
+ static const struct ice_cgu_pin_desc *t;
+
+ if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) {
+ if (input) {
+ t = ice_e823_zl_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e823_zl_cgu_inputs);
+ } else {
+ t = ice_e823_zl_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e823_zl_cgu_outputs);
+ }
+ } else if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) {
+ if (input) {
+ t = ice_e823_si_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e823_si_cgu_inputs);
+ } else {
+ t = ice_e823_si_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e823_si_cgu_outputs);
+ }
+ } else {
+ t = NULL;
+ *size = 0;
+ }
+
+ return t;
+}
+
+/**
+ * ice_cgu_get_pin_desc - get pin description array
+ * @hw: pointer to the hw struct
+ * @input: if request is done against input or output pins
+ * @size: size of array returned by function
+ *
+ * Return: pointer to pin description array associated to given hw.
+ */
+static const struct ice_cgu_pin_desc *
+ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
+{
+ const struct ice_cgu_pin_desc *t = NULL;
+
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (input) {
+ t = ice_e810t_sfp_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs);
+ } else {
+ t = ice_e810t_sfp_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs);
+ }
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ if (input) {
+ t = ice_e810t_qsfp_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs);
+ } else {
+ t = ice_e810t_qsfp_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs);
+ }
+ break;
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ t = ice_cgu_get_pin_desc_e823(hw, input, size);
+ break;
+ default:
+ break;
+ }
+
+ return t;
+}
+
+/**
+ * ice_cgu_get_pin_type - get pin's type
+ * @hw: pointer to the hw struct
+ * @pin: pin index
+ * @input: if request is done against input or output pin
+ *
+ * Return: type of a pin.
+ */
+enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input)
+{
+ const struct ice_cgu_pin_desc *t;
+ int t_size;
+
+ t = ice_cgu_get_pin_desc(hw, input, &t_size);
+
+ if (!t)
+ return 0;
+
+ if (pin >= t_size)
+ return 0;
+
+ return t[pin].type;
+}
+
+/**
+ * ice_cgu_get_pin_freq_supp - get pin's supported frequency
+ * @hw: pointer to the hw struct
+ * @pin: pin index
+ * @input: if request is done against input or output pin
+ * @num: output number of supported frequencies
+ *
+ * Get frequency supported number and array of supported frequencies.
+ *
+ * Return: array of supported frequencies for given pin.
+ */
+struct dpll_pin_frequency *
+ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num)
+{
+ const struct ice_cgu_pin_desc *t;
+ int t_size;
+
+ *num = 0;
+ t = ice_cgu_get_pin_desc(hw, input, &t_size);
+ if (!t)
+ return NULL;
+ if (pin >= t_size)
+ return NULL;
+ *num = t[pin].freq_supp_num;
+
+ return t[pin].freq_supp;
+}
+
+/**
+ * ice_cgu_get_pin_name - get pin's name
+ * @hw: pointer to the hw struct
+ * @pin: pin index
+ * @input: if request is done against input or output pin
+ *
+ * Return:
+ * * null terminated char array with name
+ * * NULL in case of failure
+ */
+const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input)
+{
+ const struct ice_cgu_pin_desc *t;
+ int t_size;
+
+ t = ice_cgu_get_pin_desc(hw, input, &t_size);
+
+ if (!t)
+ return NULL;
+
+ if (pin >= t_size)
+ return NULL;
+
+ return t[pin].name;
+}
+
+/**
+ * ice_get_cgu_state - get the state of the DPLL
+ * @hw: pointer to the hw struct
+ * @dpll_idx: Index of internal DPLL unit
+ * @last_dpll_state: last known state of DPLL
+ * @pin: pointer to a buffer for returning currently active pin
+ * @ref_state: reference clock state
+ * @eec_mode: eec mode of the DPLL
+ * @phase_offset: pointer to a buffer for returning phase offset
+ * @dpll_state: state of the DPLL (output)
+ *
+ * This function will read the state of the DPLL(dpll_idx). Non-null
+ * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to
+ * retrieve currently active pin, state, mode and phase_offset respectively.
+ *
+ * Return: state of the DPLL
+ */
+int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
+ enum dpll_lock_status last_dpll_state, u8 *pin,
+ u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
+ enum dpll_lock_status *dpll_state)
+{
+ u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config;
+ s64 hw_phase_offset;
+ int status;
+
+ status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state,
+ &hw_dpll_state, &hw_config,
+ &hw_phase_offset, &hw_eec_mode);
+ if (status)
+ return status;
+
+ if (pin)
+ /* current ref pin in dpll_state_refsel_status_X register */
+ *pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL;
+ if (phase_offset)
+ *phase_offset = hw_phase_offset;
+ if (ref_state)
+ *ref_state = hw_ref_state;
+ if (eec_mode)
+ *eec_mode = hw_eec_mode;
+ if (!dpll_state)
+ return 0;
+
+ /* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ
+ * it would never return to FREERUN. This aligns to ITU-T G.781
+ * Recommendation. We cannot report HOLDOVER as HO memory is cleared
+ * while switching to another reference.
+ * Only for situations where previous state was either: "LOCKED without
+ * HO_ACQ" or "HOLDOVER" we actually back to FREERUN.
+ */
+ if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) {
+ if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY)
+ *dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
+ else
+ *dpll_state = DPLL_LOCK_STATUS_LOCKED;
+ } else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ ||
+ last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) {
+ *dpll_state = DPLL_LOCK_STATUS_HOLDOVER;
+ } else {
+ *dpll_state = DPLL_LOCK_STATUS_UNLOCKED;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins
+ * @hw: pointer to the hw struct
+ * @base_idx: returns index of first recovered clock pin on device
+ * @pin_num: returns number of recovered clock pins available on device
+ *
+ * Based on hw provide caller info about recovery clock pins available on the
+ * board.
+ *
+ * Return:
+ * * 0 - success, information is valid
+ * * negative - failure, information is not valid
+ */
+int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
+{
+ u8 phy_idx;
+ int ret;
+
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810C_QSFP:
+
+ ret = ice_get_pf_c827_idx(hw, &phy_idx);
+ if (ret)
+ return ret;
+ *base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN);
+ *pin_num = ICE_E810_RCLK_PINS_NUM;
+ ret = 0;
+ break;
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ *pin_num = ICE_E822_RCLK_PINS_NUM;
+ ret = 0;
+ if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
+ *base_idx = ZL_REF1P;
+ else if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384)
+ *base_idx = SI_REF1P;
+ else
+ ret = -ENODEV;
+
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+
+ return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 9aa10b0426fd..18a993134826 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -3,13 +3,14 @@
#ifndef _ICE_PTP_HW_H_
#define _ICE_PTP_HW_H_
+#include <linux/dpll.h>
enum ice_ptp_tmr_cmd {
- INIT_TIME,
- INIT_INCVAL,
- ADJ_TIME,
- ADJ_TIME_AT_TIME,
- READ_TIME,
+ ICE_PTP_INIT_TIME,
+ ICE_PTP_INIT_INCVAL,
+ ICE_PTP_ADJ_TIME,
+ ICE_PTP_ADJ_TIME_AT_TIME,
+ ICE_PTP_READ_TIME,
ICE_PTP_NOP,
};
@@ -110,6 +111,77 @@ struct ice_cgu_pll_params_e822 {
u32 post_pll_div;
};
+#define E810C_QSFP_C827_0_HANDLE 2
+#define E810C_QSFP_C827_1_HANDLE 3
+enum ice_e810_c827_idx {
+ C827_0,
+ C827_1
+};
+
+enum ice_phy_rclk_pins {
+ ICE_RCLKA_PIN = 0, /* SCL pin */
+ ICE_RCLKB_PIN, /* SDA pin */
+};
+
+#define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1)
+#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
+#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \
+ (_pin) + ZL_REF1P)
+
+enum ice_zl_cgu_in_pins {
+ ZL_REF0P = 0,
+ ZL_REF0N,
+ ZL_REF1P,
+ ZL_REF1N,
+ ZL_REF2P,
+ ZL_REF2N,
+ ZL_REF3P,
+ ZL_REF3N,
+ ZL_REF4P,
+ ZL_REF4N,
+ NUM_ZL_CGU_INPUT_PINS
+};
+
+enum ice_zl_cgu_out_pins {
+ ZL_OUT0 = 0,
+ ZL_OUT1,
+ ZL_OUT2,
+ ZL_OUT3,
+ ZL_OUT4,
+ ZL_OUT5,
+ ZL_OUT6,
+ NUM_ZL_CGU_OUTPUT_PINS
+};
+
+enum ice_si_cgu_in_pins {
+ SI_REF0P = 0,
+ SI_REF0N,
+ SI_REF1P,
+ SI_REF1N,
+ SI_REF2P,
+ SI_REF2N,
+ SI_REF3,
+ SI_REF4,
+ NUM_SI_CGU_INPUT_PINS
+};
+
+enum ice_si_cgu_out_pins {
+ SI_OUT0 = 0,
+ SI_OUT1,
+ SI_OUT2,
+ SI_OUT3,
+ SI_OUT4,
+ NUM_SI_CGU_OUTPUT_PINS
+};
+
+struct ice_cgu_pin_desc {
+ char *name;
+ u8 index;
+ enum dpll_pin_type type;
+ u32 freq_supp_num;
+ struct dpll_pin_frequency *freq_supp;
+};
+
extern const struct
ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
@@ -131,6 +203,7 @@ extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
bool ice_ptp_lock(struct ice_hw *hw);
void ice_ptp_unlock(struct ice_hw *hw);
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd);
int ice_ptp_init_time(struct ice_hw *hw, u64 time);
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
@@ -197,6 +270,19 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
+bool ice_is_pca9575_present(struct ice_hw *hw);
+int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx);
+enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
+struct dpll_pin_frequency *
+ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num);
+const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input);
+int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
+ enum dpll_lock_status last_dpll_state, u8 *pin,
+ u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
+ enum dpll_lock_status *dpll_state);
+int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
+
+void ice_ptp_init_phy_model(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index c0533d7b66b9..2f4a621254e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -229,29 +229,22 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
* ice_sched_remove_elems - remove nodes from HW
* @hw: pointer to the HW struct
* @parent: pointer to the parent node
- * @num_nodes: number of nodes
- * @node_teids: array of node teids to be deleted
+ * @node_teid: node teid to be deleted
*
* This function remove nodes from HW
*/
static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
- u16 num_nodes, u32 *node_teids)
+ u32 node_teid)
{
- struct ice_aqc_delete_elem *buf;
- u16 i, num_groups_removed = 0;
- u16 buf_size;
+ DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
+ u16 buf_size = __struct_size(buf);
+ u16 num_groups_removed = 0;
int status;
- buf_size = struct_size(buf, teid, num_nodes);
- buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->hdr.parent_teid = parent->info.node_teid;
- buf->hdr.num_elems = cpu_to_le16(num_nodes);
- for (i = 0; i < num_nodes; i++)
- buf->teid[i] = cpu_to_le32(node_teids[i]);
+ buf->hdr.num_elems = cpu_to_le16(1);
+ buf->teid[0] = cpu_to_le32(node_teid);
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
@@ -259,7 +252,6 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
hw->adminq.sq_last_status);
- devm_kfree(ice_hw_to_dev(hw), buf);
return status;
}
@@ -326,7 +318,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- ice_sched_remove_elems(hw, node->parent, 1, &teid);
+ ice_sched_remove_elems(hw, node->parent, teid);
}
parent = node->parent;
/* root has no parent */
@@ -437,24 +429,20 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
}
/**
- * ice_aq_move_sched_elems - move scheduler elements
+ * ice_aq_move_sched_elems - move scheduler element (just 1 group)
* @hw: pointer to the HW struct
- * @grps_req: number of groups to move
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @grps_movd: returns total number of groups moved
- * @cd: pointer to command details structure or NULL
*
* Move scheduling elements (0x0408)
*/
int
-ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
- struct ice_aqc_move_elem *buf, u16 buf_size,
- u16 *grps_movd, struct ice_sq_cd *cd)
+ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
+ u16 buf_size, u16 *grps_movd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
- grps_req, (void *)buf, buf_size,
- grps_movd, cd);
+ 1, buf, buf_size, grps_movd, NULL);
}
/**
@@ -1193,7 +1181,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
int status;
/* remove the default leaf node */
- status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
+ status = ice_sched_remove_elems(pi->hw, node->parent, teid);
if (!status)
ice_free_sched_node(pi, node);
}
@@ -2232,12 +2220,12 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
- struct ice_aqc_move_elem *buf;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
struct ice_hw *hw;
int status = 0;
- u16 buf_len;
hw = pi->hw;
@@ -2249,35 +2237,27 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
hw->max_children[parent->tx_sched_layer])
return -ENOSPC;
- buf_len = struct_size(buf, teid, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) {
status = -EINVAL;
- goto move_err_exit;
+ break;
}
buf->hdr.src_parent_teid = node->info.parent_teid;
buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
- status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
- &grps_movd, NULL);
+ status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd);
if (status && grps_movd != 1) {
status = -EIO;
- goto move_err_exit;
+ break;
}
/* update the SW DB */
ice_sched_update_parent(parent, node);
}
-move_err_exit:
- kfree(buf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 0055d9330c07..1aef05ea5a57 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -161,10 +161,8 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
u16 *num_nodes_added);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
-int
-ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
- struct ice_aqc_move_elem *buf, u16 buf_size,
- u16 *grps_movd, struct ice_sq_cd *cd);
+int ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
+ u16 buf_size, u16 *grps_movd);
int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 2f77b684ff76..ee19f3aa3d19 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1812,15 +1812,11 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
- struct ice_aqc_alloc_free_res_elem *sw_buf;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele;
- u16 buf_len;
int status;
- buf_len = struct_size(sw_buf, elem, 1);
- sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
- if (!sw_buf)
- return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
if (lkup_type == ICE_SW_LKUP_MAC ||
@@ -1840,8 +1836,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
- status = -EINVAL;
- goto ice_aq_alloc_free_vsi_list_exit;
+ return -EINVAL;
}
if (opc == ice_aqc_opc_free_res)
@@ -1849,16 +1844,14 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc);
if (status)
- goto ice_aq_alloc_free_vsi_list_exit;
+ return status;
if (opc == ice_aqc_opc_alloc_res) {
vsi_ele = &sw_buf->elem[0];
*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
}
-ice_aq_alloc_free_vsi_list_exit:
- devm_kfree(ice_hw_to_dev(hw), sw_buf);
- return status;
+ return 0;
}
/**
@@ -2088,15 +2081,10 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
*/
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
- struct ice_aqc_alloc_free_res_elem *sw_buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
int status;
- buf_len = struct_size(sw_buf, elem, 1);
- sw_buf = kzalloc(buf_len, GFP_KERNEL);
- if (!sw_buf)
- return -ENOMEM;
-
sw_buf->num_elems = cpu_to_le16(1);
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
ICE_AQC_RES_TYPE_S) |
@@ -2105,7 +2093,6 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
ice_aqc_opc_alloc_res);
if (!status)
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
- kfree(sw_buf);
return status;
}
@@ -4434,28 +4421,19 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
- struct ice_aqc_alloc_free_res_elem *buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ u16 buf_len = __struct_size(buf);
int status;
- /* Allocate resource */
- buf_len = struct_size(buf, elem, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) | alloc_shared);
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
- goto exit;
+ return status;
*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
-
-exit:
- kfree(buf);
return status;
}
@@ -4471,16 +4449,10 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
- struct ice_aqc_alloc_free_res_elem *buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ u16 buf_len = __struct_size(buf);
int status;
- /* Free resource */
- buf_len = struct_size(buf, elem, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) | alloc_shared);
@@ -4490,7 +4462,6 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
if (status)
ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
- kfree(buf);
return status;
}
@@ -4508,15 +4479,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
*/
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
- struct ice_aqc_alloc_free_res_elem *buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ u16 buf_len = __struct_size(buf);
int status;
- buf_len = struct_size(buf, elem, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->num_elems = cpu_to_le16(1);
if (shared)
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -4534,7 +4500,6 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
type, res_id, shared ? "SHARED" : "DEDICATED");
- kfree(buf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index c8322fb6f2b3..7e06373e14d9 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -450,7 +450,7 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
if (xdp_res & ICE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 5e353b0cbe6f..bb5d8b681bc2 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -822,6 +822,13 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
+/* PHY model */
+enum ice_phy_model {
+ ICE_PHY_UNSUP = -1,
+ ICE_PHY_E810 = 1,
+ ICE_PHY_E822,
+};
+
/* Port hardware description */
struct ice_hw {
u8 __iomem *hw_addr;
@@ -843,6 +850,7 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
+ enum ice_phy_model phy_model;
u16 max_burst_size; /* driver sets this value */
@@ -901,13 +909,13 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_PHY_PER_NAC 1
-#define ICE_MAX_QUAD 2
-#define ICE_NUM_QUAD_TYPE 2
-#define ICE_PORTS_PER_QUAD 4
-#define ICE_PHY_0_LAST_QUAD 1
-#define ICE_PORTS_PER_PHY 8
-#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
+#define ICE_PHY_PER_NAC_E822 1
+#define ICE_MAX_QUAD 2
+#define ICE_QUADS_PER_PHY_E822 2
+#define ICE_PORTS_PER_PHY_E822 8
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_PORTS_PER_PHY_E810 4
+#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
@@ -965,6 +973,7 @@ struct ice_hw {
DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
u8 dvm_ena;
u16 io_expander_handle;
+ u8 cgu_part_number;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 48fea6fa0362..31a082e8a827 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -123,6 +123,9 @@ struct ice_vf {
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vf_qs; /* num of queue configured per VF */
+ u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
+#define ICE_INNER_VLAN_STRIP_ENA BIT(0)
+#define ICE_OUTER_VLAN_STRIP_ENA BIT(1)
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index db97353efd06..01e88b6e43a1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -486,6 +486,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_CRC;
+
if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
@@ -1621,6 +1624,15 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
for (i = 0; i < qci->num_queue_pairs; i++) {
+ if (!qci->qpair[i].rxq.crc_disable)
+ continue;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
+ vf->vlan_strip_ena)
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
@@ -1666,6 +1678,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+ if (qpi->rxq.crc_disable)
+ vsi->rx_rings[q_idx]->flags |=
+ ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ vsi->rx_rings[q_idx]->flags &=
+ ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+
if (qpi->rxq.databuffer_size != 0 &&
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024))
@@ -2411,6 +2430,21 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
}
/**
+ * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not
+ * @vsi: pointer to the VF VSI info
+ */
+static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi)
+{
+ unsigned int i;
+
+ ice_for_each_alloc_rxq(vsi, i)
+ if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS)
+ return true;
+
+ return false;
+}
+
+/**
* ice_vc_ena_vlan_stripping
* @vf: pointer to the VF info
*
@@ -2439,6 +2473,8 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ else
+ vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
@@ -2474,6 +2510,8 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
if (vsi->inner_vlan_ops.dis_stripping(vsi))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ else
+ vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
@@ -2651,6 +2689,8 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ vf->vlan_strip_ena = 0;
+
if (!vsi)
return -EINVAL;
@@ -2660,10 +2700,16 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
return 0;
- if (ice_vf_vlan_offload_ena(vf->driver_caps))
- return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
- else
- return vsi->inner_vlan_ops.dis_stripping(vsi);
+ if (ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ int err;
+
+ err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
+ if (!err)
+ vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
+ return err;
+ }
+
+ return vsi->inner_vlan_ops.dis_stripping(vsi);
}
static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
@@ -3437,6 +3483,11 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
goto out;
}
+ if (ice_vsi_is_rxq_crc_strip_dis(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto out;
+ }
+
ethertype_setting = strip_msg->outer_ethertype_setting;
if (ethertype_setting) {
if (ice_vc_ena_vlan_offload(vsi,
@@ -3457,6 +3508,8 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
* enabled, is extracted in L2TAG1.
*/
ice_vsi_update_l2tsel(vsi, l2tsel);
+
+ vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA;
}
}
@@ -3468,6 +3521,9 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
goto out;
}
+ if (ethertype_setting)
+ vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
+
out:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
v_ret, NULL, 0);
@@ -3529,6 +3585,8 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
* in L2TAG1.
*/
ice_vsi_update_l2tsel(vsi, l2tsel);
+
+ vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA;
}
}
@@ -3538,6 +3596,9 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
goto out;
}
+ if (ethertype_setting)
+ vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
+
out:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
v_ret, NULL, 0);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 2a3f0834e139..99954508184f 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -217,21 +217,16 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
- struct ice_aqc_add_tx_qgrp *qg_buf;
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- u16 size;
int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
return -EINVAL;
- size = struct_size(qg_buf, txqs, 1);
- qg_buf = kzalloc(size, GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
-
qg_buf->num_txqs = 1;
tx_ring = vsi->tx_rings[q_idx];
@@ -240,7 +235,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
if (err)
- goto free_buf;
+ return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
@@ -249,29 +244,28 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
qg_buf->num_txqs = 1;
err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
if (err)
- goto free_buf;
+ return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
if (err)
- goto free_buf;
+ return err;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
- goto free_buf;
+ return err;
clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-free_buf:
- kfree(qg_buf);
- return err;
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
new file mode 100644
index 000000000000..6844ead2f3ac
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2023 Intel Corporation
+
+# Makefile for Intel(R) Infrastructure Data Path Function Linux Driver
+
+obj-$(CONFIG_IDPF) += idpf.o
+
+idpf-y := \
+ idpf_controlq.o \
+ idpf_controlq_setup.o \
+ idpf_dev.o \
+ idpf_ethtool.o \
+ idpf_lib.o \
+ idpf_main.o \
+ idpf_singleq_txrx.o \
+ idpf_txrx.o \
+ idpf_virtchnl.o \
+ idpf_vf_dev.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
new file mode 100644
index 000000000000..bee73353b56a
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -0,0 +1,968 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_H_
+#define _IDPF_H_
+
+/* Forward declaration */
+struct idpf_adapter;
+struct idpf_vport;
+struct idpf_vport_max_q;
+
+#include <net/pkt_sched.h>
+#include <linux/aer.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/bitfield.h>
+#include <linux/sctp.h>
+#include <linux/ethtool.h>
+#include <net/gro.h>
+#include <linux/dim.h>
+
+#include "virtchnl2.h"
+#include "idpf_lan_txrx.h"
+#include "idpf_txrx.h"
+#include "idpf_controlq.h"
+
+#define GETMAXVAL(num_bits) GENMASK((num_bits) - 1, 0)
+
+#define IDPF_NO_FREE_SLOT 0xffff
+
+/* Default Mailbox settings */
+#define IDPF_NUM_FILTERS_PER_MSG 20
+#define IDPF_NUM_DFLT_MBX_Q 2 /* includes both TX and RX */
+#define IDPF_DFLT_MBX_Q_LEN 64
+#define IDPF_DFLT_MBX_ID -1
+/* maximum number of times to try before resetting mailbox */
+#define IDPF_MB_MAX_ERR 20
+#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
+ ((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
+#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
+#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
+
+#define IDPF_MAX_WAIT 500
+
+/* available message levels */
+#define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+#define IDPF_DIM_PROFILE_SLOTS 5
+
+#define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2
+#define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0
+
+/**
+ * struct idpf_mac_filter
+ * @list: list member field
+ * @macaddr: MAC address
+ * @remove: filter should be removed (virtchnl)
+ * @add: filter should be added (virtchnl)
+ */
+struct idpf_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+ bool remove;
+ bool add;
+};
+
+/**
+ * enum idpf_state - State machine to handle bring up
+ * @__IDPF_STARTUP: Start the state machine
+ * @__IDPF_VER_CHECK: Negotiate virtchnl version
+ * @__IDPF_GET_CAPS: Negotiate capabilities
+ * @__IDPF_INIT_SW: Init based on given capabilities
+ * @__IDPF_STATE_LAST: Must be last, used to determine size
+ */
+enum idpf_state {
+ __IDPF_STARTUP,
+ __IDPF_VER_CHECK,
+ __IDPF_GET_CAPS,
+ __IDPF_INIT_SW,
+ __IDPF_STATE_LAST,
+};
+
+/**
+ * enum idpf_flags - Hard reset causes.
+ * @IDPF_HR_FUNC_RESET: Hard reset when TxRx timeout
+ * @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW
+ * @IDPF_HR_RESET_IN_PROG: Reset in progress
+ * @IDPF_REMOVE_IN_PROG: Driver remove in progress
+ * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
+ * @IDPF_FLAGS_NBITS: Must be last
+ */
+enum idpf_flags {
+ IDPF_HR_FUNC_RESET,
+ IDPF_HR_DRV_LOAD,
+ IDPF_HR_RESET_IN_PROG,
+ IDPF_REMOVE_IN_PROG,
+ IDPF_MB_INTR_MODE,
+ IDPF_FLAGS_NBITS,
+};
+
+/**
+ * enum idpf_cap_field - Offsets into capabilities struct for specific caps
+ * @IDPF_BASE_CAPS: generic base capabilities
+ * @IDPF_CSUM_CAPS: checksum offload capabilities
+ * @IDPF_SEG_CAPS: segmentation offload capabilities
+ * @IDPF_RSS_CAPS: RSS offload capabilities
+ * @IDPF_HSPLIT_CAPS: Header split capabilities
+ * @IDPF_RSC_CAPS: RSC offload capabilities
+ * @IDPF_OTHER_CAPS: miscellaneous offloads
+ *
+ * Used when checking for a specific capability flag since different capability
+ * sets are not mutually exclusive numerically, the caller must specify which
+ * type of capability they are checking for.
+ */
+enum idpf_cap_field {
+ IDPF_BASE_CAPS = -1,
+ IDPF_CSUM_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ csum_caps),
+ IDPF_SEG_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ seg_caps),
+ IDPF_RSS_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ rss_caps),
+ IDPF_HSPLIT_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ hsplit_caps),
+ IDPF_RSC_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ rsc_caps),
+ IDPF_OTHER_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ other_caps),
+};
+
+/**
+ * enum idpf_vport_state - Current vport state
+ * @__IDPF_VPORT_DOWN: Vport is down
+ * @__IDPF_VPORT_UP: Vport is up
+ * @__IDPF_VPORT_STATE_LAST: Must be last, number of states
+ */
+enum idpf_vport_state {
+ __IDPF_VPORT_DOWN,
+ __IDPF_VPORT_UP,
+ __IDPF_VPORT_STATE_LAST,
+};
+
+/**
+ * struct idpf_netdev_priv - Struct to store vport back pointer
+ * @adapter: Adapter back pointer
+ * @vport: Vport back pointer
+ * @vport_id: Vport identifier
+ * @vport_idx: Relative vport index
+ * @state: See enum idpf_vport_state
+ * @netstats: Packet and byte stats
+ * @stats_lock: Lock to protect stats update
+ */
+struct idpf_netdev_priv {
+ struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
+ u32 vport_id;
+ u16 vport_idx;
+ enum idpf_vport_state state;
+ struct rtnl_link_stats64 netstats;
+ spinlock_t stats_lock;
+};
+
+/**
+ * struct idpf_reset_reg - Reset register offsets/masks
+ * @rstat: Reset status register
+ * @rstat_m: Reset status mask
+ */
+struct idpf_reset_reg {
+ void __iomem *rstat;
+ u32 rstat_m;
+};
+
+/**
+ * struct idpf_vport_max_q - Queue limits
+ * @max_rxq: Maximum number of RX queues supported
+ * @max_txq: Maixmum number of TX queues supported
+ * @max_bufq: In splitq, maximum number of buffer queues supported
+ * @max_complq: In splitq, maximum number of completion queues supported
+ */
+struct idpf_vport_max_q {
+ u16 max_rxq;
+ u16 max_txq;
+ u16 max_bufq;
+ u16 max_complq;
+};
+
+/**
+ * struct idpf_reg_ops - Device specific register operation function pointers
+ * @ctlq_reg_init: Mailbox control queue register initialization
+ * @intr_reg_init: Traffic interrupt register initialization
+ * @mb_intr_reg_init: Mailbox interrupt register initialization
+ * @reset_reg_init: Reset register initialization
+ * @trigger_reset: Trigger a reset to occur
+ */
+struct idpf_reg_ops {
+ void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
+ int (*intr_reg_init)(struct idpf_vport *vport);
+ void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
+ void (*reset_reg_init)(struct idpf_adapter *adapter);
+ void (*trigger_reset)(struct idpf_adapter *adapter,
+ enum idpf_flags trig_cause);
+};
+
+/**
+ * struct idpf_dev_ops - Device specific operations
+ * @reg_ops: Register operations
+ */
+struct idpf_dev_ops {
+ struct idpf_reg_ops reg_ops;
+};
+
+/* These macros allow us to generate an enum and a matching char * array of
+ * stringified enums that are always in sync. Checkpatch issues a bogus warning
+ * about this being a complex macro; but it's wrong, these are never used as a
+ * statement and instead only used to define the enum and array.
+ */
+#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
+ STATE(IDPF_VC_CREATE_VPORT) \
+ STATE(IDPF_VC_CREATE_VPORT_ERR) \
+ STATE(IDPF_VC_ENA_VPORT) \
+ STATE(IDPF_VC_ENA_VPORT_ERR) \
+ STATE(IDPF_VC_DIS_VPORT) \
+ STATE(IDPF_VC_DIS_VPORT_ERR) \
+ STATE(IDPF_VC_DESTROY_VPORT) \
+ STATE(IDPF_VC_DESTROY_VPORT_ERR) \
+ STATE(IDPF_VC_CONFIG_TXQ) \
+ STATE(IDPF_VC_CONFIG_TXQ_ERR) \
+ STATE(IDPF_VC_CONFIG_RXQ) \
+ STATE(IDPF_VC_CONFIG_RXQ_ERR) \
+ STATE(IDPF_VC_ENA_QUEUES) \
+ STATE(IDPF_VC_ENA_QUEUES_ERR) \
+ STATE(IDPF_VC_DIS_QUEUES) \
+ STATE(IDPF_VC_DIS_QUEUES_ERR) \
+ STATE(IDPF_VC_MAP_IRQ) \
+ STATE(IDPF_VC_MAP_IRQ_ERR) \
+ STATE(IDPF_VC_UNMAP_IRQ) \
+ STATE(IDPF_VC_UNMAP_IRQ_ERR) \
+ STATE(IDPF_VC_ADD_QUEUES) \
+ STATE(IDPF_VC_ADD_QUEUES_ERR) \
+ STATE(IDPF_VC_DEL_QUEUES) \
+ STATE(IDPF_VC_DEL_QUEUES_ERR) \
+ STATE(IDPF_VC_ALLOC_VECTORS) \
+ STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
+ STATE(IDPF_VC_DEALLOC_VECTORS) \
+ STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
+ STATE(IDPF_VC_SET_SRIOV_VFS) \
+ STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \
+ STATE(IDPF_VC_GET_RSS_LUT) \
+ STATE(IDPF_VC_GET_RSS_LUT_ERR) \
+ STATE(IDPF_VC_SET_RSS_LUT) \
+ STATE(IDPF_VC_SET_RSS_LUT_ERR) \
+ STATE(IDPF_VC_GET_RSS_KEY) \
+ STATE(IDPF_VC_GET_RSS_KEY_ERR) \
+ STATE(IDPF_VC_SET_RSS_KEY) \
+ STATE(IDPF_VC_SET_RSS_KEY_ERR) \
+ STATE(IDPF_VC_GET_STATS) \
+ STATE(IDPF_VC_GET_STATS_ERR) \
+ STATE(IDPF_VC_ADD_MAC_ADDR) \
+ STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \
+ STATE(IDPF_VC_DEL_MAC_ADDR) \
+ STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \
+ STATE(IDPF_VC_GET_PTYPE_INFO) \
+ STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \
+ STATE(IDPF_VC_LOOPBACK_STATE) \
+ STATE(IDPF_VC_LOOPBACK_STATE_ERR) \
+ STATE(IDPF_VC_NBITS)
+
+#define IDPF_GEN_ENUM(ENUM) ENUM,
+#define IDPF_GEN_STRING(STRING) #STRING,
+
+enum idpf_vport_vc_state {
+ IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
+};
+
+extern const char * const idpf_vport_vc_state_str[];
+
+/**
+ * enum idpf_vport_reset_cause - Vport soft reset causes
+ * @IDPF_SR_Q_CHANGE: Soft reset queue change
+ * @IDPF_SR_Q_DESC_CHANGE: Soft reset descriptor change
+ * @IDPF_SR_MTU_CHANGE: Soft reset MTU change
+ * @IDPF_SR_RSC_CHANGE: Soft reset RSC change
+ */
+enum idpf_vport_reset_cause {
+ IDPF_SR_Q_CHANGE,
+ IDPF_SR_Q_DESC_CHANGE,
+ IDPF_SR_MTU_CHANGE,
+ IDPF_SR_RSC_CHANGE,
+};
+
+/**
+ * enum idpf_vport_flags - Vport flags
+ * @IDPF_VPORT_DEL_QUEUES: To send delete queues message
+ * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets
+ * processing is done
+ * @IDPF_VPORT_FLAGS_NBITS: Must be last
+ */
+enum idpf_vport_flags {
+ IDPF_VPORT_DEL_QUEUES,
+ IDPF_VPORT_SW_MARKER,
+ IDPF_VPORT_FLAGS_NBITS,
+};
+
+struct idpf_port_stats {
+ struct u64_stats_sync stats_sync;
+ u64_stats_t rx_hw_csum_err;
+ u64_stats_t rx_hsplit;
+ u64_stats_t rx_hsplit_hbo;
+ u64_stats_t rx_bad_descs;
+ u64_stats_t tx_linearize;
+ u64_stats_t tx_busy;
+ u64_stats_t tx_drops;
+ u64_stats_t tx_dma_map_errs;
+ struct virtchnl2_vport_stats vport_stats;
+};
+
+/**
+ * struct idpf_vport - Handle for netdevices and queue resources
+ * @num_txq: Number of allocated TX queues
+ * @num_complq: Number of allocated completion queues
+ * @txq_desc_count: TX queue descriptor count
+ * @complq_desc_count: Completion queue descriptor count
+ * @compln_clean_budget: Work budget for completion clean
+ * @num_txq_grp: Number of TX queue groups
+ * @txq_grps: Array of TX queue groups
+ * @txq_model: Split queue or single queue queuing model
+ * @txqs: Used only in hotpath to get to the right queue very fast
+ * @crc_enable: Enable CRC insertion offload
+ * @num_rxq: Number of allocated RX queues
+ * @num_bufq: Number of allocated buffer queues
+ * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
+ * to complete all buffer descriptors for all buffer queues in
+ * the worst case.
+ * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
+ * @bufq_desc_count: Buffer queue descriptor count
+ * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc)
+ * @num_rxq_grp: Number of RX queues in a group
+ * @rxq_grps: Total number of RX groups. Number of groups * number of RX per
+ * group will yield total number of RX queues.
+ * @rxq_model: Splitq queue or single queue queuing model
+ * @rx_ptype_lkup: Lookup table for ptypes on RX
+ * @adapter: back pointer to associated adapter
+ * @netdev: Associated net_device. Each vport should have one and only one
+ * associated netdev.
+ * @flags: See enum idpf_vport_flags
+ * @vport_type: Default SRIOV, SIOV, etc.
+ * @vport_id: Device given vport identifier
+ * @idx: Software index in adapter vports struct
+ * @default_vport: Use this vport if one isn't specified
+ * @base_rxd: True if the driver should use base descriptors instead of flex
+ * @num_q_vectors: Number of IRQ vectors allocated
+ * @q_vectors: Array of queue vectors
+ * @q_vector_idxs: Starting index of queue vectors
+ * @max_mtu: device given max possible MTU
+ * @default_mac_addr: device will give a default MAC to use
+ * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
+ * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
+ * @port_stats: per port csum, header split, and other offload stats
+ * @link_up: True if link is up
+ * @link_speed_mbps: Link speed in mbps
+ * @vc_msg: Virtchnl message buffer
+ * @vc_state: Virtchnl message state
+ * @vchnl_wq: Wait queue for virtchnl messages
+ * @sw_marker_wq: workqueue for marker packets
+ * @vc_buf_lock: Lock to protect virtchnl buffer
+ */
+struct idpf_vport {
+ u16 num_txq;
+ u16 num_complq;
+ u32 txq_desc_count;
+ u32 complq_desc_count;
+ u32 compln_clean_budget;
+ u16 num_txq_grp;
+ struct idpf_txq_group *txq_grps;
+ u32 txq_model;
+ struct idpf_queue **txqs;
+ bool crc_enable;
+
+ u16 num_rxq;
+ u16 num_bufq;
+ u32 rxq_desc_count;
+ u8 num_bufqs_per_qgrp;
+ u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+ u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+ u16 num_rxq_grp;
+ struct idpf_rxq_group *rxq_grps;
+ u32 rxq_model;
+ struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE];
+
+ struct idpf_adapter *adapter;
+ struct net_device *netdev;
+ DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
+ u16 vport_type;
+ u32 vport_id;
+ u16 idx;
+ bool default_vport;
+ bool base_rxd;
+
+ u16 num_q_vectors;
+ struct idpf_q_vector *q_vectors;
+ u16 *q_vector_idxs;
+ u16 max_mtu;
+ u8 default_mac_addr[ETH_ALEN];
+ u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
+ u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
+ struct idpf_port_stats port_stats;
+
+ bool link_up;
+ u32 link_speed_mbps;
+
+ char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
+ DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
+
+ wait_queue_head_t vchnl_wq;
+ wait_queue_head_t sw_marker_wq;
+ struct mutex vc_buf_lock;
+};
+
+/**
+ * enum idpf_user_flags
+ * @__IDPF_PROMISC_UC: Unicast promiscuous mode
+ * @__IDPF_PROMISC_MC: Multicast promiscuous mode
+ * @__IDPF_USER_FLAGS_NBITS: Must be last
+ */
+enum idpf_user_flags {
+ __IDPF_PROMISC_UC = 32,
+ __IDPF_PROMISC_MC,
+
+ __IDPF_USER_FLAGS_NBITS,
+};
+
+/**
+ * struct idpf_rss_data - Associated RSS data
+ * @rss_key_size: Size of RSS hash key
+ * @rss_key: RSS hash key
+ * @rss_lut_size: Size of RSS lookup table
+ * @rss_lut: RSS lookup table
+ * @cached_lut: Used to restore previously init RSS lut
+ */
+struct idpf_rss_data {
+ u16 rss_key_size;
+ u8 *rss_key;
+ u16 rss_lut_size;
+ u32 *rss_lut;
+ u32 *cached_lut;
+};
+
+/**
+ * struct idpf_vport_user_config_data - User defined configuration values for
+ * each vport.
+ * @rss_data: See struct idpf_rss_data
+ * @num_req_tx_qs: Number of user requested TX queues through ethtool
+ * @num_req_rx_qs: Number of user requested RX queues through ethtool
+ * @num_req_txq_desc: Number of user requested TX queue descriptors through
+ * ethtool
+ * @num_req_rxq_desc: Number of user requested RX queue descriptors through
+ * ethtool
+ * @user_flags: User toggled config flags
+ * @mac_filter_list: List of MAC filters
+ *
+ * Used to restore configuration after a reset as the vport will get wiped.
+ */
+struct idpf_vport_user_config_data {
+ struct idpf_rss_data rss_data;
+ u16 num_req_tx_qs;
+ u16 num_req_rx_qs;
+ u32 num_req_txq_desc;
+ u32 num_req_rxq_desc;
+ DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
+ struct list_head mac_filter_list;
+};
+
+/**
+ * enum idpf_vport_config_flags - Vport config flags
+ * @IDPF_VPORT_REG_NETDEV: Register netdev
+ * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
+ * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight
+ * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight
+ * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
+ */
+enum idpf_vport_config_flags {
+ IDPF_VPORT_REG_NETDEV,
+ IDPF_VPORT_UP_REQUESTED,
+ IDPF_VPORT_ADD_MAC_REQ,
+ IDPF_VPORT_DEL_MAC_REQ,
+ IDPF_VPORT_CONFIG_FLAGS_NBITS,
+};
+
+/**
+ * struct idpf_avail_queue_info
+ * @avail_rxq: Available RX queues
+ * @avail_txq: Available TX queues
+ * @avail_bufq: Available buffer queues
+ * @avail_complq: Available completion queues
+ *
+ * Maintain total queues available after allocating max queues to each vport.
+ */
+struct idpf_avail_queue_info {
+ u16 avail_rxq;
+ u16 avail_txq;
+ u16 avail_bufq;
+ u16 avail_complq;
+};
+
+/**
+ * struct idpf_vector_info - Utility structure to pass function arguments as a
+ * structure
+ * @num_req_vecs: Vectors required based on the number of queues updated by the
+ * user via ethtool
+ * @num_curr_vecs: Current number of vectors, must be >= @num_req_vecs
+ * @index: Relative starting index for vectors
+ * @default_vport: Vectors are for default vport
+ */
+struct idpf_vector_info {
+ u16 num_req_vecs;
+ u16 num_curr_vecs;
+ u16 index;
+ bool default_vport;
+};
+
+/**
+ * struct idpf_vector_lifo - Stack to maintain vector indexes used for vector
+ * distribution algorithm
+ * @top: Points to stack top i.e. next available vector index
+ * @base: Always points to start of the free pool
+ * @size: Total size of the vector stack
+ * @vec_idx: Array to store all the vector indexes
+ *
+ * Vector stack maintains all the relative vector indexes at the *adapter*
+ * level. This stack is divided into 2 parts, first one is called as 'default
+ * pool' and other one is called 'free pool'. Vector distribution algorithm
+ * gives priority to default vports in a way that at least IDPF_MIN_Q_VEC
+ * vectors are allocated per default vport and the relative vector indexes for
+ * those are maintained in default pool. Free pool contains all the unallocated
+ * vector indexes which can be allocated on-demand basis. Mailbox vector index
+ * is maintained in the default pool of the stack.
+ */
+struct idpf_vector_lifo {
+ u16 top;
+ u16 base;
+ u16 size;
+ u16 *vec_idx;
+};
+
+/**
+ * struct idpf_vport_config - Vport configuration data
+ * @user_config: see struct idpf_vport_user_config_data
+ * @max_q: Maximum possible queues
+ * @req_qs_chunks: Queue chunk data for requested queues
+ * @mac_filter_list_lock: Lock to protect mac filters
+ * @flags: See enum idpf_vport_config_flags
+ */
+struct idpf_vport_config {
+ struct idpf_vport_user_config_data user_config;
+ struct idpf_vport_max_q max_q;
+ void *req_qs_chunks;
+ spinlock_t mac_filter_list_lock;
+ DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
+};
+
+/**
+ * struct idpf_adapter - Device data struct generated on probe
+ * @pdev: PCI device struct given on probe
+ * @virt_ver_maj: Virtchnl version major
+ * @virt_ver_min: Virtchnl version minor
+ * @msg_enable: Debug message level enabled
+ * @mb_wait_count: Number of times mailbox was attempted initialization
+ * @state: Init state machine
+ * @flags: See enum idpf_flags
+ * @reset_reg: See struct idpf_reset_reg
+ * @hw: Device access data
+ * @num_req_msix: Requested number of MSIX vectors
+ * @num_avail_msix: Available number of MSIX vectors
+ * @num_msix_entries: Number of entries in MSIX table
+ * @msix_entries: MSIX table
+ * @req_vec_chunks: Requested vector chunk data
+ * @mb_vector: Mailbox vector data
+ * @vector_stack: Stack to store the msix vector indexes
+ * @irq_mb_handler: Handler for hard interrupt for mailbox
+ * @tx_timeout_count: Number of TX timeouts that have occurred
+ * @avail_queues: Device given queue limits
+ * @vports: Array to store vports created by the driver
+ * @netdevs: Associated Vport netdevs
+ * @vport_params_reqd: Vport params requested
+ * @vport_params_recvd: Vport params received
+ * @vport_ids: Array of device given vport identifiers
+ * @vport_config: Vport config parameters
+ * @max_vports: Maximum vports that can be allocated
+ * @num_alloc_vports: Current number of vports allocated
+ * @next_vport: Next free slot in pf->vport[] - 0-based!
+ * @init_task: Initialization task
+ * @init_wq: Workqueue for initialization task
+ * @serv_task: Periodically recurring maintenance task
+ * @serv_wq: Workqueue for service task
+ * @mbx_task: Task to handle mailbox interrupts
+ * @mbx_wq: Workqueue for mailbox responses
+ * @vc_event_task: Task to handle out of band virtchnl event notifications
+ * @vc_event_wq: Workqueue for virtchnl events
+ * @stats_task: Periodic statistics retrieval task
+ * @stats_wq: Workqueue for statistics task
+ * @caps: Negotiated capabilities with device
+ * @vchnl_wq: Wait queue for virtchnl messages
+ * @vc_state: Virtchnl message state
+ * @vc_msg: Virtchnl message buffer
+ * @dev_ops: See idpf_dev_ops
+ * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
+ * to VFs but is used to initialize them
+ * @crc_enable: Enable CRC insertion offload
+ * @req_tx_splitq: TX split or single queue model to request
+ * @req_rx_splitq: RX split or single queue model to request
+ * @vport_ctrl_lock: Lock to protect the vport control flow
+ * @vector_lock: Lock to protect vector distribution
+ * @queue_lock: Lock to protect queue distribution
+ * @vc_buf_lock: Lock to protect virtchnl buffer
+ */
+struct idpf_adapter {
+ struct pci_dev *pdev;
+ u32 virt_ver_maj;
+ u32 virt_ver_min;
+
+ u32 msg_enable;
+ u32 mb_wait_count;
+ enum idpf_state state;
+ DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
+ struct idpf_reset_reg reset_reg;
+ struct idpf_hw hw;
+ u16 num_req_msix;
+ u16 num_avail_msix;
+ u16 num_msix_entries;
+ struct msix_entry *msix_entries;
+ struct virtchnl2_alloc_vectors *req_vec_chunks;
+ struct idpf_q_vector mb_vector;
+ struct idpf_vector_lifo vector_stack;
+ irqreturn_t (*irq_mb_handler)(int irq, void *data);
+
+ u32 tx_timeout_count;
+ struct idpf_avail_queue_info avail_queues;
+ struct idpf_vport **vports;
+ struct net_device **netdevs;
+ struct virtchnl2_create_vport **vport_params_reqd;
+ struct virtchnl2_create_vport **vport_params_recvd;
+ u32 *vport_ids;
+
+ struct idpf_vport_config **vport_config;
+ u16 max_vports;
+ u16 num_alloc_vports;
+ u16 next_vport;
+
+ struct delayed_work init_task;
+ struct workqueue_struct *init_wq;
+ struct delayed_work serv_task;
+ struct workqueue_struct *serv_wq;
+ struct delayed_work mbx_task;
+ struct workqueue_struct *mbx_wq;
+ struct delayed_work vc_event_task;
+ struct workqueue_struct *vc_event_wq;
+ struct delayed_work stats_task;
+ struct workqueue_struct *stats_wq;
+ struct virtchnl2_get_capabilities caps;
+
+ wait_queue_head_t vchnl_wq;
+ DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
+ char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
+ struct idpf_dev_ops dev_ops;
+ int num_vfs;
+ bool crc_enable;
+ bool req_tx_splitq;
+ bool req_rx_splitq;
+
+ struct mutex vport_ctrl_lock;
+ struct mutex vector_lock;
+ struct mutex queue_lock;
+ struct mutex vc_buf_lock;
+};
+
+/**
+ * idpf_is_queue_model_split - check if queue model is split
+ * @q_model: queue model single or split
+ *
+ * Returns true if queue model is split else false
+ */
+static inline int idpf_is_queue_model_split(u16 q_model)
+{
+ return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
+}
+
+#define idpf_is_cap_ena(adapter, field, flag) \
+ idpf_is_capability_ena(adapter, false, field, flag)
+#define idpf_is_cap_ena_all(adapter, field, flag) \
+ idpf_is_capability_ena(adapter, true, field, flag)
+
+bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
+ enum idpf_cap_field field, u64 flag);
+
+#define IDPF_CAP_RSS (\
+ VIRTCHNL2_CAP_RSS_IPV4_TCP |\
+ VIRTCHNL2_CAP_RSS_IPV4_TCP |\
+ VIRTCHNL2_CAP_RSS_IPV4_UDP |\
+ VIRTCHNL2_CAP_RSS_IPV4_SCTP |\
+ VIRTCHNL2_CAP_RSS_IPV4_OTHER |\
+ VIRTCHNL2_CAP_RSS_IPV6_TCP |\
+ VIRTCHNL2_CAP_RSS_IPV6_TCP |\
+ VIRTCHNL2_CAP_RSS_IPV6_UDP |\
+ VIRTCHNL2_CAP_RSS_IPV6_SCTP |\
+ VIRTCHNL2_CAP_RSS_IPV6_OTHER)
+
+#define IDPF_CAP_RSC (\
+ VIRTCHNL2_CAP_RSC_IPV4_TCP |\
+ VIRTCHNL2_CAP_RSC_IPV6_TCP)
+
+#define IDPF_CAP_HSPLIT (\
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
+
+#define IDPF_CAP_RX_CSUM_L4V4 (\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
+
+#define IDPF_CAP_RX_CSUM_L4V6 (\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+
+#define IDPF_CAP_RX_CSUM (\
+ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+
+#define IDPF_CAP_SCTP_CSUM (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
+
+#define IDPF_CAP_TUNNEL_TX_CSUM (\
+ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL)
+
+/**
+ * idpf_get_reserved_vecs - Get reserved vectors
+ * @adapter: private data struct
+ */
+static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.num_allocated_vectors);
+}
+
+/**
+ * idpf_get_default_vports - Get default number of vports
+ * @adapter: private data struct
+ */
+static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.default_num_vports);
+}
+
+/**
+ * idpf_get_max_vports - Get max number of vports
+ * @adapter: private data struct
+ */
+static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.max_vports);
+}
+
+/**
+ * idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device
+ * @adapter: private data struct
+ */
+static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter)
+{
+ return adapter->caps.max_sg_bufs_per_tx_pkt;
+}
+
+/**
+ * idpf_get_min_tx_pkt_len - Get min packet length supported by the device
+ * @adapter: private data struct
+ */
+static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
+{
+ u8 pkt_len = adapter->caps.min_sso_packet_len;
+
+ return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN;
+}
+
+/**
+ * idpf_get_reg_addr - Get BAR0 register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Based on the register offset, return the actual BAR0 register address
+ */
+static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ return (void __iomem *)(adapter->hw.hw_addr + reg_offset);
+}
+
+/**
+ * idpf_is_reset_detected - check if we were reset at some point
+ * @adapter: driver specific private structure
+ *
+ * Returns true if we are either in reset currently or were previously reset.
+ */
+static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
+{
+ if (!adapter->hw.arq)
+ return true;
+
+ return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) &
+ adapter->hw.arq->reg.len_mask);
+}
+
+/**
+ * idpf_is_reset_in_prog - check if reset is in progress
+ * @adapter: driver specific private structure
+ *
+ * Returns true if hard reset is in progress, false otherwise
+ */
+static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter)
+{
+ return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) ||
+ test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
+ test_bit(IDPF_HR_DRV_LOAD, adapter->flags));
+}
+
+/**
+ * idpf_netdev_to_vport - get a vport handle from a netdev
+ * @netdev: network interface device structure
+ */
+static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return np->vport;
+}
+
+/**
+ * idpf_netdev_to_adapter - Get adapter handle from a netdev
+ * @netdev: Network interface device structure
+ */
+static inline struct idpf_adapter *idpf_netdev_to_adapter(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return np->adapter;
+}
+
+/**
+ * idpf_is_feature_ena - Determine if a particular feature is enabled
+ * @vport: Vport to check
+ * @feature: Netdev flag to check
+ *
+ * Returns true or false if a particular feature is enabled.
+ */
+static inline bool idpf_is_feature_ena(const struct idpf_vport *vport,
+ netdev_features_t feature)
+{
+ return vport->netdev->features & feature;
+}
+
+/**
+ * idpf_get_max_tx_hdr_size -- get the size of tx header
+ * @adapter: Driver specific private structure
+ */
+static inline u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.max_tx_hdr_size);
+}
+
+/**
+ * idpf_vport_ctrl_lock - Acquire the vport control lock
+ * @netdev: Network interface device structure
+ *
+ * This lock should be used by non-datapath code to protect against vport
+ * destruction.
+ */
+static inline void idpf_vport_ctrl_lock(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ mutex_lock(&np->adapter->vport_ctrl_lock);
+}
+
+/**
+ * idpf_vport_ctrl_unlock - Release the vport control lock
+ * @netdev: Network interface device structure
+ */
+static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ mutex_unlock(&np->adapter->vport_ctrl_lock);
+}
+
+void idpf_statistics_task(struct work_struct *work);
+void idpf_init_task(struct work_struct *work);
+void idpf_service_task(struct work_struct *work);
+void idpf_mbx_task(struct work_struct *work);
+void idpf_vc_event_task(struct work_struct *work);
+void idpf_dev_ops_init(struct idpf_adapter *adapter);
+void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
+int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
+int idpf_vc_core_init(struct idpf_adapter *adapter);
+void idpf_vc_core_deinit(struct idpf_adapter *adapter);
+int idpf_intr_req(struct idpf_adapter *adapter);
+void idpf_intr_rel(struct idpf_adapter *adapter);
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals);
+u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
+int idpf_send_delete_queues_msg(struct idpf_vport *vport);
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
+int idpf_initiate_soft_reset(struct idpf_vport *vport,
+ enum idpf_vport_reset_cause reset_cause);
+int idpf_send_enable_vport_msg(struct idpf_vport *vport);
+int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
+void idpf_deinit_task(struct idpf_adapter *adapter);
+int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
+ u16 *q_vector_idxs,
+ struct idpf_vector_info *vec_info);
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks);
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
+ void *msg, int msg_size);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg);
+void idpf_set_ethtool_ops(struct net_device *netdev);
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async);
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id);
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+u32 idpf_get_vport_id(struct idpf_vport *vport);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+int idpf_queue_reg_init(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_check_supported_desc_ids(struct idpf_vport *vport);
+void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
+ u16 itr, bool tx);
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
+int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
+
+#endif /* !_IDPF_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
new file mode 100644
index 000000000000..c7f43d2fcd13
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf_controlq.h"
+
+/**
+ * idpf_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
+ struct idpf_ctlq_create_info *q_create_info)
+{
+ /* set control queue registers in our local struct */
+ cq->reg.head = q_create_info->reg.head;
+ cq->reg.tail = q_create_info->reg.tail;
+ cq->reg.len = q_create_info->reg.len;
+ cq->reg.bah = q_create_info->reg.bah;
+ cq->reg.bal = q_create_info->reg.bal;
+ cq->reg.len_mask = q_create_info->reg.len_mask;
+ cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+ cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * idpf_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ bool is_rxq)
+{
+ /* Update tail to post pre-allocated buffers for rx queues */
+ if (is_rxq)
+ wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
+
+ /* For non-Mailbox control queues only TAIL need to be set */
+ if (cq->q_id != -1)
+ return;
+
+ /* Clear Head for both send or receive */
+ wr32(hw, cq->reg.head, 0);
+
+ /* set starting point */
+ wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
+ wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
+ wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+ int i;
+
+ for (i = 0; i < cq->ring_size; i++) {
+ struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+ struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+ /* No buffer to post to descriptor, continue */
+ if (!bi)
+ continue;
+
+ desc->flags =
+ cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+ desc->opcode = 0;
+ desc->datalen = cpu_to_le16(bi->size);
+ desc->ret_val = 0;
+ desc->v_opcode_dtype = 0;
+ desc->v_retval = 0;
+ desc->params.indirect.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.indirect.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.indirect.param0 = 0;
+ desc->params.indirect.sw_cookie = 0;
+ desc->params.indirect.v_flags = 0;
+ }
+}
+
+/**
+ * idpf_ctlq_shutdown - shutdown the CQ
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for any controq queue
+ */
+static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ mutex_lock(&cq->cq_lock);
+
+ /* free ring buffers and the ring itself */
+ idpf_ctlq_dealloc_ring_res(hw, cq);
+
+ /* Set ring_size to 0 to indicate uninitialized queue */
+ cq->ring_size = 0;
+
+ mutex_unlock(&cq->cq_lock);
+ mutex_destroy(&cq->cq_lock);
+}
+
+/**
+ * idpf_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ *
+ * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
+ */
+int idpf_ctlq_add(struct idpf_hw *hw,
+ struct idpf_ctlq_create_info *qinfo,
+ struct idpf_ctlq_info **cq_out)
+{
+ struct idpf_ctlq_info *cq;
+ bool is_rxq = false;
+ int err;
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return -ENOMEM;
+
+ cq->cq_type = qinfo->type;
+ cq->q_id = qinfo->id;
+ cq->buf_size = qinfo->buf_size;
+ cq->ring_size = qinfo->len;
+
+ cq->next_to_use = 0;
+ cq->next_to_clean = 0;
+ cq->next_to_post = cq->ring_size - 1;
+
+ switch (qinfo->type) {
+ case IDPF_CTLQ_TYPE_MAILBOX_RX:
+ is_rxq = true;
+ fallthrough;
+ case IDPF_CTLQ_TYPE_MAILBOX_TX:
+ err = idpf_ctlq_alloc_ring_res(hw, cq);
+ break;
+ default:
+ err = -EBADR;
+ break;
+ }
+
+ if (err)
+ goto init_free_q;
+
+ if (is_rxq) {
+ idpf_ctlq_init_rxq_bufs(cq);
+ } else {
+ /* Allocate the array of msg pointers for TX queues */
+ cq->bi.tx_msg = kcalloc(qinfo->len,
+ sizeof(struct idpf_ctlq_msg *),
+ GFP_KERNEL);
+ if (!cq->bi.tx_msg) {
+ err = -ENOMEM;
+ goto init_dealloc_q_mem;
+ }
+ }
+
+ idpf_ctlq_setup_regs(cq, qinfo);
+
+ idpf_ctlq_init_regs(hw, cq, is_rxq);
+
+ mutex_init(&cq->cq_lock);
+
+ list_add(&cq->cq_list, &hw->cq_list_head);
+
+ *cq_out = cq;
+
+ return 0;
+
+init_dealloc_q_mem:
+ /* free ring buffers and the ring itself */
+ idpf_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+ kfree(cq);
+
+ return err;
+}
+
+/**
+ * idpf_ctlq_remove - deallocate and remove specified control queue
+ * @hw: pointer to hardware struct
+ * @cq: pointer to control queue to be removed
+ */
+void idpf_ctlq_remove(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq)
+{
+ list_del(&cq->cq_list);
+ idpf_ctlq_shutdown(hw, cq);
+ kfree(cq);
+}
+
+/**
+ * idpf_ctlq_init - main initialization routine for all control queues
+ * @hw: pointer to hardware struct
+ * @num_q: number of queues to initialize
+ * @q_info: array of structs containing info for each queue to be initialized
+ *
+ * This initializes any number and any type of control queues. This is an all
+ * or nothing routine; if one fails, all previously allocated queues will be
+ * destroyed. This must be called prior to using the individual add/remove
+ * APIs.
+ */
+int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
+ struct idpf_ctlq_create_info *q_info)
+{
+ struct idpf_ctlq_info *cq, *tmp;
+ int err;
+ int i;
+
+ INIT_LIST_HEAD(&hw->cq_list_head);
+
+ for (i = 0; i < num_q; i++) {
+ struct idpf_ctlq_create_info *qinfo = q_info + i;
+
+ err = idpf_ctlq_add(hw, qinfo, &cq);
+ if (err)
+ goto init_destroy_qs;
+ }
+
+ return 0;
+
+init_destroy_qs:
+ list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
+ idpf_ctlq_remove(hw, cq);
+
+ return err;
+}
+
+/**
+ * idpf_ctlq_deinit - destroy all control queues
+ * @hw: pointer to hw struct
+ */
+void idpf_ctlq_deinit(struct idpf_hw *hw)
+{
+ struct idpf_ctlq_info *cq, *tmp;
+
+ list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
+ idpf_ctlq_remove(hw, cq);
+}
+
+/**
+ * idpf_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+ struct idpf_ctlq_desc *desc;
+ int num_desc_avail;
+ int err = 0;
+ int i;
+
+ mutex_lock(&cq->cq_lock);
+
+ /* Ensure there are enough descriptors to send all messages */
+ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+ if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+ err = -ENOSPC;
+ goto err_unlock;
+ }
+
+ for (i = 0; i < num_q_msg; i++) {
+ struct idpf_ctlq_msg *msg = &q_msg[i];
+
+ desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+
+ desc->opcode = cpu_to_le16(msg->opcode);
+ desc->pfid_vfid = cpu_to_le16(msg->func_id);
+
+ desc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode);
+ desc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval);
+
+ desc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) <<
+ IDPF_CTLQ_FLAG_HOST_ID_S);
+ if (msg->data_len) {
+ struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+ desc->datalen |= cpu_to_le16(msg->data_len);
+ desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF);
+ desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD);
+
+ /* Update the address values in the desc with the pa
+ * value for respective buffer
+ */
+ desc->params.indirect.addr_high =
+ cpu_to_le32(upper_32_bits(buff->pa));
+ desc->params.indirect.addr_low =
+ cpu_to_le32(lower_32_bits(buff->pa));
+
+ memcpy(&desc->params, msg->ctx.indirect.context,
+ IDPF_INDIRECT_CTX_SIZE);
+ } else {
+ memcpy(&desc->params, msg->ctx.direct,
+ IDPF_DIRECT_CTX_SIZE);
+ }
+
+ /* Store buffer info */
+ cq->bi.tx_msg[cq->next_to_use] = msg;
+
+ (cq->next_to_use)++;
+ if (cq->next_to_use == cq->ring_size)
+ cq->next_to_use = 0;
+ }
+
+ /* Force memory write to complete before letting hardware
+ * know that there are new descriptors to fetch.
+ */
+ dma_wmb();
+
+ wr32(hw, cq->reg.tail, cq->next_to_use);
+
+err_unlock:
+ mutex_unlock(&cq->cq_lock);
+
+ return err;
+}
+
+/**
+ * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors. The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+ struct idpf_ctlq_msg *msg_status[])
+{
+ struct idpf_ctlq_desc *desc;
+ u16 i, num_to_clean;
+ u16 ntc, desc_err;
+
+ if (*clean_count == 0)
+ return 0;
+ if (*clean_count > cq->ring_size)
+ return -EBADR;
+
+ mutex_lock(&cq->cq_lock);
+
+ ntc = cq->next_to_clean;
+
+ num_to_clean = *clean_count;
+
+ for (i = 0; i < num_to_clean; i++) {
+ /* Fetch next descriptor and check if marked as done */
+ desc = IDPF_CTLQ_DESC(cq, ntc);
+ if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
+ break;
+
+ /* strip off FW internal code */
+ desc_err = le16_to_cpu(desc->ret_val) & 0xff;
+
+ msg_status[i] = cq->bi.tx_msg[ntc];
+ msg_status[i]->status = desc_err;
+
+ cq->bi.tx_msg[ntc] = NULL;
+
+ /* Zero out any stale data */
+ memset(desc, 0, sizeof(*desc));
+
+ ntc++;
+ if (ntc == cq->ring_size)
+ ntc = 0;
+ }
+
+ cq->next_to_clean = ntc;
+
+ mutex_unlock(&cq->cq_lock);
+
+ /* Return number of descriptors actually cleaned */
+ *clean_count = i;
+
+ return 0;
+}
+
+/**
+ * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ u16 *buff_count, struct idpf_dma_mem **buffs)
+{
+ struct idpf_ctlq_desc *desc;
+ u16 ntp = cq->next_to_post;
+ bool buffs_avail = false;
+ u16 tbp = ntp + 1;
+ int i = 0;
+
+ if (*buff_count > cq->ring_size)
+ return -EBADR;
+
+ if (*buff_count > 0)
+ buffs_avail = true;
+
+ mutex_lock(&cq->cq_lock);
+
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+
+ if (tbp == cq->next_to_clean)
+ /* Nothing to do */
+ goto post_buffs_out;
+
+ /* Post buffers for as many as provided or up until the last one used */
+ while (ntp != cq->next_to_clean) {
+ desc = IDPF_CTLQ_DESC(cq, ntp);
+
+ if (cq->bi.rx_buff[ntp])
+ goto fill_desc;
+ if (!buffs_avail) {
+ /* If the caller hasn't given us any buffers or
+ * there are none left, search the ring itself
+ * for an available buffer to move to this
+ * entry starting at the next entry in the ring
+ */
+ tbp = ntp + 1;
+
+ /* Wrap ring if necessary */
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+
+ while (tbp != cq->next_to_clean) {
+ if (cq->bi.rx_buff[tbp]) {
+ cq->bi.rx_buff[ntp] =
+ cq->bi.rx_buff[tbp];
+ cq->bi.rx_buff[tbp] = NULL;
+
+ /* Found a buffer, no need to
+ * search anymore
+ */
+ break;
+ }
+
+ /* Wrap ring if necessary */
+ tbp++;
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+ }
+
+ if (tbp == cq->next_to_clean)
+ goto post_buffs_out;
+ } else {
+ /* Give back pointer to DMA buffer */
+ cq->bi.rx_buff[ntp] = buffs[i];
+ i++;
+
+ if (i >= *buff_count)
+ buffs_avail = false;
+ }
+
+fill_desc:
+ desc->flags =
+ cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+ /* Post buffers to descriptor */
+ desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size);
+ desc->params.indirect.addr_high =
+ cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa));
+ desc->params.indirect.addr_low =
+ cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa));
+
+ ntp++;
+ if (ntp == cq->ring_size)
+ ntp = 0;
+ }
+
+post_buffs_out:
+ /* Only update tail if buffers were actually posted */
+ if (cq->next_to_post != ntp) {
+ if (ntp)
+ /* Update next_to_post to ntp - 1 since current ntp
+ * will not have a buffer
+ */
+ cq->next_to_post = ntp - 1;
+ else
+ /* Wrap to end of end ring since current ntp is 0 */
+ cq->next_to_post = cq->ring_size - 1;
+
+ wr32(hw, cq->reg.tail, cq->next_to_post);
+ }
+
+ mutex_unlock(&cq->cq_lock);
+
+ /* return the number of buffers that were not posted */
+ *buff_count = *buff_count - i;
+
+ return 0;
+}
+
+/**
+ * idpf_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ struct idpf_ctlq_msg *q_msg)
+{
+ u16 num_to_clean, ntc, flags;
+ struct idpf_ctlq_desc *desc;
+ int err = 0;
+ u16 i;
+
+ if (*num_q_msg == 0)
+ return 0;
+ else if (*num_q_msg > cq->ring_size)
+ return -EBADR;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&cq->cq_lock);
+
+ ntc = cq->next_to_clean;
+
+ num_to_clean = *num_q_msg;
+
+ for (i = 0; i < num_to_clean; i++) {
+ /* Fetch next descriptor and check if marked as done */
+ desc = IDPF_CTLQ_DESC(cq, ntc);
+ flags = le16_to_cpu(desc->flags);
+
+ if (!(flags & IDPF_CTLQ_FLAG_DD))
+ break;
+
+ q_msg[i].vmvf_type = (flags &
+ (IDPF_CTLQ_FLAG_FTYPE_VM |
+ IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+ IDPF_CTLQ_FLAG_FTYPE_S;
+
+ if (flags & IDPF_CTLQ_FLAG_ERR)
+ err = -EBADMSG;
+
+ q_msg[i].cookie.mbx.chnl_opcode =
+ le32_to_cpu(desc->v_opcode_dtype);
+ q_msg[i].cookie.mbx.chnl_retval =
+ le32_to_cpu(desc->v_retval);
+
+ q_msg[i].opcode = le16_to_cpu(desc->opcode);
+ q_msg[i].data_len = le16_to_cpu(desc->datalen);
+ q_msg[i].status = le16_to_cpu(desc->ret_val);
+
+ if (desc->datalen) {
+ memcpy(q_msg[i].ctx.indirect.context,
+ &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE);
+
+ /* Assign pointer to dma buffer to ctlq_msg array
+ * to be given to upper layer
+ */
+ q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+ /* Zero out pointer to DMA buffer info;
+ * will be repopulated by post buffers API
+ */
+ cq->bi.rx_buff[ntc] = NULL;
+ } else {
+ memcpy(q_msg[i].ctx.direct, desc->params.raw,
+ IDPF_DIRECT_CTX_SIZE);
+ }
+
+ /* Zero out stale data in descriptor */
+ memset(desc, 0, sizeof(struct idpf_ctlq_desc));
+
+ ntc++;
+ if (ntc == cq->ring_size)
+ ntc = 0;
+ }
+
+ cq->next_to_clean = ntc;
+
+ mutex_unlock(&cq->cq_lock);
+
+ *num_q_msg = i;
+ if (*num_q_msg == 0)
+ err = -ENOMSG;
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
new file mode 100644
index 000000000000..c1aba09e9856
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_CONTROLQ_H_
+#define _IDPF_CONTROLQ_H_
+
+#include <linux/slab.h>
+
+#include "idpf_controlq_api.h"
+
+/* Maximum buffer length for all control queue types */
+#define IDPF_CTLQ_MAX_BUF_LEN 4096
+
+#define IDPF_CTLQ_DESC(R, i) \
+ (&(((struct idpf_ctlq_desc *)((R)->desc_ring.va))[i]))
+
+#define IDPF_CTLQ_DESC_UNUSED(R) \
+ ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \
+ (R)->next_to_clean - (R)->next_to_use - 1))
+
+/* Control Queue default settings */
+#define IDPF_CTRL_SQ_CMD_TIMEOUT 250 /* msecs */
+
+struct idpf_ctlq_desc {
+ /* Control queue descriptor flags */
+ __le16 flags;
+ /* Control queue message opcode */
+ __le16 opcode;
+ __le16 datalen; /* 0 for direct commands */
+ union {
+ __le16 ret_val;
+ __le16 pfid_vfid;
+#define IDPF_CTLQ_DESC_VF_ID_S 0
+#define IDPF_CTLQ_DESC_VF_ID_M (0x7FF << IDPF_CTLQ_DESC_VF_ID_S)
+#define IDPF_CTLQ_DESC_PF_ID_S 11
+#define IDPF_CTLQ_DESC_PF_ID_M (0x1F << IDPF_CTLQ_DESC_PF_ID_S)
+ };
+
+ /* Virtchnl message opcode and virtchnl descriptor type
+ * v_opcode=[27:0], v_dtype=[31:28]
+ */
+ __le32 v_opcode_dtype;
+ /* Virtchnl return value */
+ __le32 v_retval;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } direct;
+ struct {
+ __le32 param0;
+ __le16 sw_cookie;
+ /* Virtchnl flags */
+ __le16 v_flags;
+ __le32 addr_high;
+ __le32 addr_low;
+ } indirect;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR| * RSV * |FTYPE | *RSV* |RD |VFC|BUF| HOST_ID |
+ */
+/* command flags and offsets */
+#define IDPF_CTLQ_FLAG_DD_S 0
+#define IDPF_CTLQ_FLAG_CMP_S 1
+#define IDPF_CTLQ_FLAG_ERR_S 2
+#define IDPF_CTLQ_FLAG_FTYPE_S 6
+#define IDPF_CTLQ_FLAG_RD_S 10
+#define IDPF_CTLQ_FLAG_VFC_S 11
+#define IDPF_CTLQ_FLAG_BUF_S 12
+#define IDPF_CTLQ_FLAG_HOST_ID_S 13
+
+#define IDPF_CTLQ_FLAG_DD BIT(IDPF_CTLQ_FLAG_DD_S) /* 0x1 */
+#define IDPF_CTLQ_FLAG_CMP BIT(IDPF_CTLQ_FLAG_CMP_S) /* 0x2 */
+#define IDPF_CTLQ_FLAG_ERR BIT(IDPF_CTLQ_FLAG_ERR_S) /* 0x4 */
+#define IDPF_CTLQ_FLAG_FTYPE_VM BIT(IDPF_CTLQ_FLAG_FTYPE_S) /* 0x40 */
+#define IDPF_CTLQ_FLAG_FTYPE_PF BIT(IDPF_CTLQ_FLAG_FTYPE_S + 1) /* 0x80 */
+#define IDPF_CTLQ_FLAG_RD BIT(IDPF_CTLQ_FLAG_RD_S) /* 0x400 */
+#define IDPF_CTLQ_FLAG_VFC BIT(IDPF_CTLQ_FLAG_VFC_S) /* 0x800 */
+#define IDPF_CTLQ_FLAG_BUF BIT(IDPF_CTLQ_FLAG_BUF_S) /* 0x1000 */
+
+/* Host ID is a special field that has 3b and not a 1b flag */
+#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK(0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S)
+
+struct idpf_mbxq_desc {
+ u8 pad[8]; /* CTLQ flags/opcode/len/retval fields */
+ u32 chnl_opcode; /* avoid confusion with desc->opcode */
+ u32 chnl_retval; /* ditto for desc->retval */
+ u32 pf_vf_id; /* used by CP when sending to PF */
+};
+
+/* Define the driver hardware struct to replace other control structs as needed
+ * Align to ctlq_hw_info
+ */
+struct idpf_hw {
+ void __iomem *hw_addr;
+ resource_size_t hw_addr_len;
+
+ struct idpf_adapter *back;
+
+ /* control queue - send and receive */
+ struct idpf_ctlq_info *asq;
+ struct idpf_ctlq_info *arq;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
+
+ struct list_head cq_list_head;
+};
+
+int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq);
+
+void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+
+/* prototype for functions used for dynamic memory allocation */
+void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem,
+ u64 size);
+void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem);
+#endif /* _IDPF_CONTROLQ_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
new file mode 100644
index 000000000000..8dee098bbfb0
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_CONTROLQ_API_H_
+#define _IDPF_CONTROLQ_API_H_
+
+#include "idpf_mem.h"
+
+struct idpf_hw;
+
+/* Used for queue init, response and events */
+enum idpf_ctlq_type {
+ IDPF_CTLQ_TYPE_MAILBOX_TX = 0,
+ IDPF_CTLQ_TYPE_MAILBOX_RX = 1,
+ IDPF_CTLQ_TYPE_CONFIG_TX = 2,
+ IDPF_CTLQ_TYPE_CONFIG_RX = 3,
+ IDPF_CTLQ_TYPE_EVENT_RX = 4,
+ IDPF_CTLQ_TYPE_RDMA_TX = 5,
+ IDPF_CTLQ_TYPE_RDMA_RX = 6,
+ IDPF_CTLQ_TYPE_RDMA_COMPL = 7
+};
+
+/* Generic Control Queue Structures */
+struct idpf_ctlq_reg {
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ /* Below applies only to default mb (if present) */
+ u32 len;
+ u32 bah;
+ u32 bal;
+ u32 len_mask;
+ u32 len_ena_mask;
+ u32 head_mask;
+};
+
+/* Generic queue msg structure */
+struct idpf_ctlq_msg {
+ u8 vmvf_type; /* represents the source of the message on recv */
+#define IDPF_VMVF_TYPE_VF 0
+#define IDPF_VMVF_TYPE_VM 1
+#define IDPF_VMVF_TYPE_PF 2
+ u8 host_id;
+ /* 3b field used only when sending a message to CP - to be used in
+ * combination with target func_id to route the message
+ */
+#define IDPF_HOST_ID_MASK 0x7
+
+ u16 opcode;
+ u16 data_len; /* data_len = 0 when no payload is attached */
+ union {
+ u16 func_id; /* when sending a message */
+ u16 status; /* when receiving a message */
+ };
+ union {
+ struct {
+ u32 chnl_opcode;
+ u32 chnl_retval;
+ } mbx;
+ } cookie;
+ union {
+#define IDPF_DIRECT_CTX_SIZE 16
+#define IDPF_INDIRECT_CTX_SIZE 8
+ /* 16 bytes of context can be provided or 8 bytes of context
+ * plus the address of a DMA buffer
+ */
+ u8 direct[IDPF_DIRECT_CTX_SIZE];
+ struct {
+ u8 context[IDPF_INDIRECT_CTX_SIZE];
+ struct idpf_dma_mem *payload;
+ } indirect;
+ } ctx;
+};
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct idpf_ctlq_create_info {
+ enum idpf_ctlq_type type;
+ int id; /* absolute queue offset passed as input
+ * -1 for default mailbox if present
+ */
+ u16 len; /* Queue length passed as input */
+ u16 buf_size; /* buffer size passed as input */
+ u64 base_address; /* output, HPA of the Queue start */
+ struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+
+ int ext_info_size;
+ void *ext_info; /* Specific to q type */
+};
+
+/* Control Queue information */
+struct idpf_ctlq_info {
+ struct list_head cq_list;
+
+ enum idpf_ctlq_type cq_type;
+ int q_id;
+ struct mutex cq_lock; /* control queue lock */
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_post; /* starting descriptor to post buffers
+ * to after recev
+ */
+
+ struct idpf_dma_mem desc_ring; /* descriptor ring memory
+ * idpf_dma_mem is defined in OSdep.h
+ */
+ union {
+ struct idpf_dma_mem **rx_buff;
+ struct idpf_ctlq_msg **tx_msg;
+ } bi;
+
+ u16 buf_size; /* queue buffer size */
+ u16 ring_size; /* Number of descriptors */
+ struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+};
+
+/**
+ * enum idpf_mbx_opc - PF/VF mailbox commands
+ * @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP
+ */
+enum idpf_mbx_opc {
+ idpf_mbq_opc_send_msg_to_cp = 0x0801,
+};
+
+/* API supported for control queue management */
+/* Will init all required q including default mb. "q_info" is an array of
+ * create_info structs equal to the number of control queues to be created.
+ */
+int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
+ struct idpf_ctlq_create_info *q_info);
+
+/* Allocate and initialize a single control queue, which will be added to the
+ * control queue list; returns a handle to the created control queue
+ */
+int idpf_ctlq_add(struct idpf_hw *hw,
+ struct idpf_ctlq_create_info *qinfo,
+ struct idpf_ctlq_info **cq);
+
+/* Deinitialize and deallocate a single control queue */
+void idpf_ctlq_remove(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq);
+
+/* Sends messages to HW and will also free the buffer*/
+int idpf_ctlq_send(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq,
+ u16 num_q_msg,
+ struct idpf_ctlq_msg q_msg[]);
+
+/* Receives messages and called by interrupt handler/polling
+ * initiated by app/process. Also caller is supposed to free the buffers
+ */
+int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ struct idpf_ctlq_msg *q_msg);
+
+/* Reclaims send descriptors on HW write back */
+int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+ struct idpf_ctlq_msg *msg_status[]);
+
+/* Indicate RX buffers are done being processed */
+int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq,
+ u16 *buff_count,
+ struct idpf_dma_mem **buffs);
+
+/* Will destroy all q including the default mb */
+void idpf_ctlq_deinit(struct idpf_hw *hw);
+
+#endif /* _IDPF_CONTROLQ_API_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c b/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c
new file mode 100644
index 000000000000..a942a6385d06
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf_controlq.h"
+
+/**
+ * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ */
+static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq)
+{
+ size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
+
+ cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
+ if (!cq->desc_ring.va)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Allocate the buffer head for all control queues, and if it's a receive
+ * queue, allocate DMA buffers
+ */
+static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq)
+{
+ int i;
+
+ /* Do not allocate DMA buffers for transmit queues */
+ if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
+ return 0;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+ cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *),
+ GFP_KERNEL);
+ if (!cq->bi.rx_buff)
+ return -ENOMEM;
+
+ /* allocate the mapped buffers (except for the last one) */
+ for (i = 0; i < cq->ring_size - 1; i++) {
+ struct idpf_dma_mem *bi;
+ int num = 1; /* number of idpf_dma_mem to be allocated */
+
+ cq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem),
+ GFP_KERNEL);
+ if (!cq->bi.rx_buff[i])
+ goto unwind_alloc_cq_bufs;
+
+ bi = cq->bi.rx_buff[i];
+
+ bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
+ if (!bi->va) {
+ /* unwind will not free the failed entry */
+ kfree(cq->bi.rx_buff[i]);
+ goto unwind_alloc_cq_bufs;
+ }
+ }
+
+ return 0;
+
+unwind_alloc_cq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--) {
+ idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
+ kfree(cq->bi.rx_buff[i]);
+ }
+ kfree(cq->bi.rx_buff);
+
+ return -ENOMEM;
+}
+
+/**
+ * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ */
+static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq)
+{
+ idpf_free_dma_mem(hw, &cq->desc_ring);
+}
+
+/**
+ * idpf_ctlq_free_bufs - Free CQ buffer info elements
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
+ * queues. The upper layers are expected to manage freeing of TX DMA buffers
+ */
+static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ void *bi;
+
+ if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
+ int i;
+
+ /* free DMA buffers for rx queues*/
+ for (i = 0; i < cq->ring_size; i++) {
+ if (cq->bi.rx_buff[i]) {
+ idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
+ kfree(cq->bi.rx_buff[i]);
+ }
+ }
+
+ bi = (void *)cq->bi.rx_buff;
+ } else {
+ bi = (void *)cq->bi.tx_msg;
+ }
+
+ /* free the buffer header */
+ kfree(bi);
+}
+
+/**
+ * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Free the memory used by the ring, buffers and other related structures
+ */
+void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ /* free ring buffers and the ring itself */
+ idpf_ctlq_free_bufs(hw, cq);
+ idpf_ctlq_free_desc_ring(hw, cq);
+}
+
+/**
+ * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ *
+ * Do *NOT* hold cq_lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ int err;
+
+ /* allocate the ring memory */
+ err = idpf_ctlq_alloc_desc_ring(hw, cq);
+ if (err)
+ return err;
+
+ /* allocate buffers in the rings */
+ err = idpf_ctlq_alloc_bufs(hw, cq);
+ if (err)
+ goto idpf_init_cq_free_ring;
+
+ /* success! */
+ return 0;
+
+idpf_init_cq_free_ring:
+ idpf_free_dma_mem(hw, &cq->desc_ring);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
new file mode 100644
index 000000000000..34ad1ac46b78
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_lan_pf_regs.h"
+
+#define IDPF_PF_ITR_IDX_SPACING 0x4
+
+/**
+ * idpf_ctlq_reg_init - initialize default mailbox registers
+ * @cq: pointer to the array of create control queues
+ */
+static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+{
+ int i;
+
+ for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
+ struct idpf_ctlq_create_info *ccq = cq + i;
+
+ switch (ccq->type) {
+ case IDPF_CTLQ_TYPE_MAILBOX_TX:
+ /* set head and tail registers in our local struct */
+ ccq->reg.head = PF_FW_ATQH;
+ ccq->reg.tail = PF_FW_ATQT;
+ ccq->reg.len = PF_FW_ATQLEN;
+ ccq->reg.bah = PF_FW_ATQBAH;
+ ccq->reg.bal = PF_FW_ATQBAL;
+ ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;
+ ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
+ ccq->reg.head_mask = PF_FW_ATQH_ATQH_M;
+ break;
+ case IDPF_CTLQ_TYPE_MAILBOX_RX:
+ /* set head and tail registers in our local struct */
+ ccq->reg.head = PF_FW_ARQH;
+ ccq->reg.tail = PF_FW_ARQT;
+ ccq->reg.len = PF_FW_ARQLEN;
+ ccq->reg.bah = PF_FW_ARQBAH;
+ ccq->reg.bal = PF_FW_ARQBAL;
+ ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;
+ ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
+ ccq->reg.head_mask = PF_FW_ARQH_ARQH_M;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * idpf_mb_intr_reg_init - Initialize mailbox interrupt register
+ * @adapter: adapter structure
+ */
+static void idpf_mb_intr_reg_init(struct idpf_adapter *adapter)
+{
+ struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
+ u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl);
+
+ intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl);
+ intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
+ intr->dyn_ctl_itridx_m = PF_GLINT_DYN_CTL_ITR_INDX_M;
+ intr->icr_ena = idpf_get_reg_addr(adapter, PF_INT_DIR_OICR_ENA);
+ intr->icr_ena_ctlq_m = PF_INT_DIR_OICR_ENA_M;
+}
+
+/**
+ * idpf_intr_reg_init - Initialize interrupt registers
+ * @vport: virtual port structure
+ */
+static int idpf_intr_reg_init(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ int num_vecs = vport->num_q_vectors;
+ struct idpf_vec_regs *reg_vals;
+ int num_regs, i, err = 0;
+ u32 rx_itr, tx_itr;
+ u16 total_vecs;
+
+ total_vecs = idpf_get_reserved_vecs(vport->adapter);
+ reg_vals = kcalloc(total_vecs, sizeof(struct idpf_vec_regs),
+ GFP_KERNEL);
+ if (!reg_vals)
+ return -ENOMEM;
+
+ num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ if (num_regs < num_vecs) {
+ err = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ for (i = 0; i < num_vecs; i++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[i];
+ u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_intr_reg *intr = &q_vector->intr_reg;
+ u32 spacing;
+
+ intr->dyn_ctl = idpf_get_reg_addr(adapter,
+ reg_vals[vec_id].dyn_ctl_reg);
+ intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
+ intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
+ intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
+
+ spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
+ IDPF_PF_ITR_IDX_SPACING);
+ rx_itr = PF_GLINT_ITR_ADDR(VIRTCHNL2_ITR_IDX_0,
+ reg_vals[vec_id].itrn_reg,
+ spacing);
+ tx_itr = PF_GLINT_ITR_ADDR(VIRTCHNL2_ITR_IDX_1,
+ reg_vals[vec_id].itrn_reg,
+ spacing);
+ intr->rx_itr = idpf_get_reg_addr(adapter, rx_itr);
+ intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
+ }
+
+free_reg_vals:
+ kfree(reg_vals);
+
+ return err;
+}
+
+/**
+ * idpf_reset_reg_init - Initialize reset registers
+ * @adapter: Driver specific private structure
+ */
+static void idpf_reset_reg_init(struct idpf_adapter *adapter)
+{
+ adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT);
+ adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M;
+}
+
+/**
+ * idpf_trigger_reset - trigger reset
+ * @adapter: Driver specific private structure
+ * @trig_cause: Reason to trigger a reset
+ */
+static void idpf_trigger_reset(struct idpf_adapter *adapter,
+ enum idpf_flags __always_unused trig_cause)
+{
+ u32 reset_reg;
+
+ reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ writel(reset_reg | PFGEN_CTRL_PFSWR,
+ idpf_get_reg_addr(adapter, PFGEN_CTRL));
+}
+
+/**
+ * idpf_reg_ops_init - Initialize register API function pointers
+ * @adapter: Driver specific private structure
+ */
+static void idpf_reg_ops_init(struct idpf_adapter *adapter)
+{
+ adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_ctlq_reg_init;
+ adapter->dev_ops.reg_ops.intr_reg_init = idpf_intr_reg_init;
+ adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init;
+ adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init;
+ adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset;
+}
+
+/**
+ * idpf_dev_ops_init - Initialize device API function pointers
+ * @adapter: Driver specific private structure
+ */
+void idpf_dev_ops_init(struct idpf_adapter *adapter)
+{
+ idpf_reg_ops_init(adapter);
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_devids.h b/drivers/net/ethernet/intel/idpf/idpf_devids.h
new file mode 100644
index 000000000000..5154a52ae61c
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_devids.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_DEVIDS_H_
+#define _IDPF_DEVIDS_H_
+
+#define IDPF_DEV_ID_PF 0x1452
+#define IDPF_DEV_ID_VF 0x145C
+
+#endif /* _IDPF_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
new file mode 100644
index 000000000000..52ea38669f85
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -0,0 +1,1369 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+/**
+ * idpf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule locations
+ *
+ * Returns Success if the command is supported.
+ */
+static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 __always_unused *rule_locs)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = vport->num_rxq;
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+ default:
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the key size on success, error value on failure.
+ */
+static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+
+ if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
+ return -EOPNOTSUPP;
+
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+
+ return user_config->rss_data.rss_key_size;
+}
+
+/**
+ * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size on success, error value on failure.
+ */
+static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+
+ if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
+ return -EOPNOTSUPP;
+
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+
+ return user_config->rss_data.rss_lut_size;
+}
+
+/**
+ * idpf_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function in use
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ */
+static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_rss_data *rss_data;
+ struct idpf_adapter *adapter;
+ int err = 0;
+ u16 i;
+
+ idpf_vport_ctrl_lock(netdev);
+
+ adapter = np->adapter;
+
+ if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
+ err = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
+ rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
+ if (np->state != __IDPF_VPORT_UP)
+ goto unlock_mutex;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (key)
+ memcpy(key, rss_data->rss_key, rss_data->rss_key_size);
+
+ if (indir) {
+ for (i = 0; i < rss_data->rss_lut_size; i++)
+ indir[i] = rss_data->rss_lut[i];
+ }
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function to use
+ *
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * returns 0 after programming the table.
+ */
+static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_rss_data *rss_data;
+ struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
+ int err = 0;
+ u16 lut;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ adapter = vport->adapter;
+
+ if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
+ err = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ if (np->state != __IDPF_VPORT_UP)
+ goto unlock_mutex;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ err = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
+ if (key)
+ memcpy(rss_data->rss_key, key, rss_data->rss_key_size);
+
+ if (indir) {
+ for (lut = 0; lut < rss_data->rss_lut_size; lut++)
+ rss_data->rss_lut[lut] = indir[lut];
+ }
+
+ err = idpf_config_rss(vport);
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * Report maximum of TX and RX. Report one extra channel to match our MailBox
+ * Queue.
+ */
+static void idpf_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ u16 num_txq, num_rxq;
+ u16 combined;
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+
+ num_txq = vport_config->user_config.num_req_tx_qs;
+ num_rxq = vport_config->user_config.num_req_rx_qs;
+
+ combined = min(num_txq, num_rxq);
+
+ /* Report maximum channels */
+ ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
+ vport_config->max_q.max_rxq);
+ ch->max_rx = vport_config->max_q.max_rxq;
+ ch->max_tx = vport_config->max_q.max_txq;
+
+ ch->max_other = IDPF_MAX_MBXQ;
+ ch->other_count = IDPF_MAX_MBXQ;
+
+ ch->combined_count = combined;
+ ch->rx_count = num_rxq - combined;
+ ch->tx_count = num_txq - combined;
+}
+
+/**
+ * idpf_set_channels: set the new channel count
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * Negotiate a new number of channels with CP. Returns 0 on success, negative
+ * on failure.
+ */
+static int idpf_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct idpf_vport_config *vport_config;
+ u16 combined, num_txq, num_rxq;
+ unsigned int num_req_tx_q;
+ unsigned int num_req_rx_q;
+ struct idpf_vport *vport;
+ struct device *dev;
+ int err = 0;
+ u16 idx;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ idx = vport->idx;
+ vport_config = vport->adapter->vport_config[idx];
+
+ num_txq = vport_config->user_config.num_req_tx_qs;
+ num_rxq = vport_config->user_config.num_req_rx_qs;
+
+ combined = min(num_txq, num_rxq);
+
+ /* these checks are for cases where user didn't specify a particular
+ * value on cmd line but we get non-zero value anyway via
+ * get_channels(); look at ethtool.c in ethtool repository (the user
+ * space part), particularly, do_schannels() routine
+ */
+ if (ch->combined_count == combined)
+ ch->combined_count = 0;
+ if (ch->combined_count && ch->rx_count == num_rxq - combined)
+ ch->rx_count = 0;
+ if (ch->combined_count && ch->tx_count == num_txq - combined)
+ ch->tx_count = 0;
+
+ num_req_tx_q = ch->combined_count + ch->tx_count;
+ num_req_rx_q = ch->combined_count + ch->rx_count;
+
+ dev = &vport->adapter->pdev->dev;
+ /* It's possible to specify number of queues that exceeds max.
+ * Stack checks max combined_count and max [tx|rx]_count but not the
+ * max combined_count + [tx|rx]_count. These checks should catch that.
+ */
+ if (num_req_tx_q > vport_config->max_q.max_txq) {
+ dev_info(dev, "Maximum TX queues is %d\n",
+ vport_config->max_q.max_txq);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+ if (num_req_rx_q > vport_config->max_q.max_rxq) {
+ dev_info(dev, "Maximum RX queues is %d\n",
+ vport_config->max_q.max_rxq);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
+ goto unlock_mutex;
+
+ vport_config->user_config.num_req_tx_qs = num_req_tx_q;
+ vport_config->user_config.num_req_rx_qs = num_req_rx_q;
+
+ err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
+ if (err) {
+ /* roll back queue change */
+ vport_config->user_config.num_req_tx_qs = num_txq;
+ vport_config->user_config.num_req_rx_qs = num_rxq;
+ }
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ * @kring: unused
+ * @ext_ack: unused
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ */
+static void idpf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
+ ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
+ ring->rx_pending = vport->rxq_desc_count;
+ ring->tx_pending = vport->txq_desc_count;
+
+ idpf_vport_ctrl_unlock(netdev);
+}
+
+/**
+ * idpf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ * @kring: unused
+ * @ext_ack: unused
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ */
+static int idpf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct idpf_vport_user_config_data *config_data;
+ u32 new_rx_count, new_tx_count;
+ struct idpf_vport *vport;
+ int i, err = 0;
+ u16 idx;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ idx = vport->idx;
+
+ if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
+ netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
+ ring->tx_pending,
+ IDPF_MIN_TXQ_DESC);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
+ netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
+ ring->rx_pending,
+ IDPF_MIN_RXQ_DESC);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
+ if (new_rx_count != ring->rx_pending)
+ netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
+ new_rx_count);
+
+ new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
+ if (new_tx_count != ring->tx_pending)
+ netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
+ new_tx_count);
+
+ if (new_tx_count == vport->txq_desc_count &&
+ new_rx_count == vport->rxq_desc_count)
+ goto unlock_mutex;
+
+ config_data = &vport->adapter->vport_config[idx]->user_config;
+ config_data->num_req_txq_desc = new_tx_count;
+ config_data->num_req_rxq_desc = new_rx_count;
+
+ /* Since we adjusted the RX completion queue count, the RX buffer queue
+ * descriptor count needs to be adjusted as well
+ */
+ for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
+ vport->bufq_desc_count[i] =
+ IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
+ vport->num_bufqs_per_qgrp);
+
+ err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * struct idpf_stats - definition for an ethtool statistic
+ * @stat_string: statistic name to display in ethtool -S output
+ * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
+ * @stat_offset: offsetof() the stat from a base pointer
+ *
+ * This structure defines a statistic to be added to the ethtool stats buffer.
+ * It defines a statistic as offset from a common base pointer. Stats should
+ * be defined in constant arrays using the IDPF_STAT macro, with every element
+ * of the array using the same _type for calculating the sizeof_stat and
+ * stat_offset.
+ *
+ * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
+ * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
+ * the idpf_add_ethtool_stat() helper function.
+ *
+ * The @stat_string is interpreted as a format string, allowing formatted
+ * values to be inserted while looping over multiple structures for a given
+ * statistics array. Thus, every statistic string in an array should have the
+ * same type and number of format specifiers, to be formatted by variadic
+ * arguments to the idpf_add_stat_string() helper function.
+ */
+struct idpf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+/* Helper macro to define an idpf_stat structure with proper size and type.
+ * Use this when defining constant statistics arrays. Note that @_type expects
+ * only a type name and is used multiple times.
+ */
+#define IDPF_STAT(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = sizeof_field(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
+}
+
+/* Helper macro for defining some statistics related to queues */
+#define IDPF_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_queue, _name, _stat)
+
+/* Stats associated with a Tx queue */
+static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
+ IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
+ IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
+ IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
+};
+
+/* Stats associated with an Rx queue */
+static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
+ IDPF_QUEUE_STAT("pkts", q_stats.rx.packets),
+ IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes),
+ IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts),
+};
+
+#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
+#define IDPF_RX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
+
+#define IDPF_PORT_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_vport, _name, _stat)
+
+static const struct idpf_stats idpf_gstrings_port_stats[] = {
+ IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
+ IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
+ IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
+ IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
+ IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
+ IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
+ IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
+ IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
+ IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
+ IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
+ IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
+ IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
+ IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
+ IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
+ IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
+};
+
+#define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
+
+/**
+ * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @size: size of the stats array
+ * @type: stat type
+ * @idx: stat index
+ *
+ * Format and copy the strings described by stats into the buffer pointed at
+ * by p.
+ */
+static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
+ const unsigned int size, const char *type,
+ unsigned int idx)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ ethtool_sprintf(p, "%s_q-%u_%s",
+ type, idx, stats[i].stat_string);
+}
+
+/**
+ * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @type: stat type
+ * @idx: stat idx
+ *
+ * Format and copy the strings described by the const static stats value into
+ * the buffer pointed at by p.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided. Additionally, stats must be an array such that
+ * ARRAY_SIZE can be called on it.
+ */
+#define idpf_add_qstat_strings(p, stats, type, idx) \
+ __idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
+
+/**
+ * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
+ * @p: ethtool buffer
+ * @stats: struct to copy from
+ * @size: size of stats array to copy from
+ */
+static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ ethtool_sprintf(p, "%s", stats[i].stat_string);
+}
+
+/**
+ * idpf_get_stat_strings - Get stat strings
+ * @netdev: network interface device structure
+ * @data: buffer for string data
+ *
+ * Builds the statistics string table
+ */
+static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ unsigned int i;
+
+ idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
+ IDPF_PORT_STATS_LEN);
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+ /* It's critical that we always report a constant number of strings and
+ * that the strings are reported in the same order regardless of how
+ * many queues are actually in use.
+ */
+ for (i = 0; i < vport_config->max_q.max_txq; i++)
+ idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
+ "tx", i);
+
+ for (i = 0; i < vport_config->max_q.max_rxq; i++)
+ idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
+ "rx", i);
+
+ page_pool_ethtool_stats_get_strings(data);
+}
+
+/**
+ * idpf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds string tables for various string sets
+ */
+static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ idpf_get_stat_strings(netdev, data);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * idpf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of various string tables.
+ */
+static int idpf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ u16 max_txq, max_rxq;
+ unsigned int size;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+ /* This size reported back here *must* be constant throughout the
+ * lifecycle of the netdevice, i.e. we must report the maximum length
+ * even for queues that don't technically exist. This is due to the
+ * fact that this userspace API uses three separate ioctl calls to get
+ * stats data but has no way to communicate back to userspace when that
+ * size has changed, which can typically happen as a result of changing
+ * number of queues. If the number/order of stats change in the middle
+ * of this call chain it will lead to userspace crashing/accessing bad
+ * data through buffer under/overflow.
+ */
+ max_txq = vport_config->max_q.max_txq;
+ max_rxq = vport_config->max_q.max_rxq;
+
+ size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
+ (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
+ size += page_pool_ethtool_stats_get_count();
+
+ return size;
+}
+
+/**
+ * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
+ * @data: location to store the stat value
+ * @pstat: old stat pointer to copy from
+ * @stat: the stat definition
+ *
+ * Copies the stat data defined by the pointer and stat structure pair into
+ * the memory supplied as data. If the pointer is null, data will be zero'd.
+ */
+static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
+ const struct idpf_stats *stat)
+{
+ char *p;
+
+ if (!pstat) {
+ /* Ensure that the ethtool data buffer is zero'd for any stats
+ * which don't have a valid pointer.
+ */
+ *data = 0;
+ return;
+ }
+
+ p = (char *)pstat + stat->stat_offset;
+ switch (stat->sizeof_stat) {
+ case sizeof(u64):
+ *data = *((u64 *)p);
+ break;
+ case sizeof(u32):
+ *data = *((u32 *)p);
+ break;
+ case sizeof(u16):
+ *data = *((u16 *)p);
+ break;
+ case sizeof(u8):
+ *data = *((u8 *)p);
+ break;
+ default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stat->stat_string);
+ *data = 0;
+ }
+}
+
+/**
+ * idpf_add_queue_stats - copy queue statistics into supplied buffer
+ * @data: ethtool stats buffer
+ * @q: the queue to copy
+ *
+ * Queue statistics must be copied while protected by u64_stats_fetch_begin,
+ * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
+ * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
+ * zero out the queue stat values and update the data pointer. Otherwise
+ * safely copy the stats from the queue into the supplied buffer and update
+ * the data pointer when finished.
+ *
+ * This function expects to be called while under rcu_read_lock().
+ */
+static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
+{
+ const struct idpf_stats *stats;
+ unsigned int start;
+ unsigned int size;
+ unsigned int i;
+
+ if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ size = IDPF_RX_QUEUE_STATS_LEN;
+ stats = idpf_gstrings_rx_queue_stats;
+ } else {
+ size = IDPF_TX_QUEUE_STATS_LEN;
+ stats = idpf_gstrings_tx_queue_stats;
+ }
+
+ /* To avoid invalid statistics values, ensure that we keep retrying
+ * the copy until we get a consistent value according to
+ * u64_stats_fetch_retry.
+ */
+ do {
+ start = u64_stats_fetch_begin(&q->stats_sync);
+ for (i = 0; i < size; i++)
+ idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
+ } while (u64_stats_fetch_retry(&q->stats_sync, start));
+
+ /* Once we successfully copy the stats in, update the data pointer */
+ *data += size;
+}
+
+/**
+ * idpf_add_empty_queue_stats - Add stats for a non-existent queue
+ * @data: pointer to data buffer
+ * @qtype: type of data queue
+ *
+ * We must report a constant length of stats back to userspace regardless of
+ * how many queues are actually in use because stats collection happens over
+ * three separate ioctls and there's no way to notify userspace the size
+ * changed between those calls. This adds empty to data to the stats since we
+ * don't have a real queue to refer to for this stats slot.
+ */
+static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
+{
+ unsigned int i;
+ int stats_len;
+
+ if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
+ stats_len = IDPF_RX_QUEUE_STATS_LEN;
+ else
+ stats_len = IDPF_TX_QUEUE_STATS_LEN;
+
+ for (i = 0; i < stats_len; i++)
+ (*data)[i] = 0;
+ *data += stats_len;
+}
+
+/**
+ * idpf_add_port_stats - Copy port stats into ethtool buffer
+ * @vport: virtual port struct
+ * @data: ethtool buffer to copy into
+ */
+static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
+{
+ unsigned int size = IDPF_PORT_STATS_LEN;
+ unsigned int start;
+ unsigned int i;
+
+ do {
+ start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
+ for (i = 0; i < size; i++)
+ idpf_add_one_ethtool_stat(&(*data)[i], vport,
+ &idpf_gstrings_port_stats[i]);
+ } while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
+
+ *data += size;
+}
+
+/**
+ * idpf_collect_queue_stats - accumulate various per queue stats
+ * into port level stats
+ * @vport: pointer to vport struct
+ **/
+static void idpf_collect_queue_stats(struct idpf_vport *vport)
+{
+ struct idpf_port_stats *pstats = &vport->port_stats;
+ int i, j;
+
+ /* zero out port stats since they're actually tracked in per
+ * queue stats; this is only for reporting
+ */
+ u64_stats_update_begin(&pstats->stats_sync);
+ u64_stats_set(&pstats->rx_hw_csum_err, 0);
+ u64_stats_set(&pstats->rx_hsplit, 0);
+ u64_stats_set(&pstats->rx_hsplit_hbo, 0);
+ u64_stats_set(&pstats->rx_bad_descs, 0);
+ u64_stats_set(&pstats->tx_linearize, 0);
+ u64_stats_set(&pstats->tx_busy, 0);
+ u64_stats_set(&pstats->tx_drops, 0);
+ u64_stats_set(&pstats->tx_dma_map_errs, 0);
+ u64_stats_update_end(&pstats->stats_sync);
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ u16 num_rxq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rxq_grp->splitq.num_rxq_sets;
+ else
+ num_rxq = rxq_grp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
+ struct idpf_rx_queue_stats *stats;
+ struct idpf_queue *rxq;
+ unsigned int start;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
+ else
+ rxq = rxq_grp->singleq.rxqs[j];
+
+ if (!rxq)
+ continue;
+
+ do {
+ start = u64_stats_fetch_begin(&rxq->stats_sync);
+
+ stats = &rxq->q_stats.rx;
+ hw_csum_err = u64_stats_read(&stats->hw_csum_err);
+ hsplit = u64_stats_read(&stats->hsplit_pkts);
+ hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
+ bad_descs = u64_stats_read(&stats->bad_descs);
+ } while (u64_stats_fetch_retry(&rxq->stats_sync, start));
+
+ u64_stats_update_begin(&pstats->stats_sync);
+ u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
+ u64_stats_add(&pstats->rx_hsplit, hsplit);
+ u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
+ u64_stats_add(&pstats->rx_bad_descs, bad_descs);
+ u64_stats_update_end(&pstats->stats_sync);
+ }
+ }
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (j = 0; j < txq_grp->num_txq; j++) {
+ u64 linearize, qbusy, skb_drops, dma_map_errs;
+ struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue_stats *stats;
+ unsigned int start;
+
+ if (!txq)
+ continue;
+
+ do {
+ start = u64_stats_fetch_begin(&txq->stats_sync);
+
+ stats = &txq->q_stats.tx;
+ linearize = u64_stats_read(&stats->linearize);
+ qbusy = u64_stats_read(&stats->q_busy);
+ skb_drops = u64_stats_read(&stats->skb_drops);
+ dma_map_errs = u64_stats_read(&stats->dma_map_errs);
+ } while (u64_stats_fetch_retry(&txq->stats_sync, start));
+
+ u64_stats_update_begin(&pstats->stats_sync);
+ u64_stats_add(&pstats->tx_linearize, linearize);
+ u64_stats_add(&pstats->tx_busy, qbusy);
+ u64_stats_add(&pstats->tx_drops, skb_drops);
+ u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
+ u64_stats_update_end(&pstats->stats_sync);
+ }
+ }
+}
+
+/**
+ * idpf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ */
+static void idpf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ struct page_pool_stats pp_stats = { };
+ struct idpf_vport *vport;
+ unsigned int total = 0;
+ unsigned int i, j;
+ bool is_splitq;
+ u16 qtype;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (np->state != __IDPF_VPORT_UP) {
+ idpf_vport_ctrl_unlock(netdev);
+
+ return;
+ }
+
+ rcu_read_lock();
+
+ idpf_collect_queue_stats(vport);
+ idpf_add_port_stats(vport, &data);
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ qtype = VIRTCHNL2_QUEUE_TYPE_TX;
+
+ for (j = 0; j < txq_grp->num_txq; j++, total++) {
+ struct idpf_queue *txq = txq_grp->txqs[j];
+
+ if (!txq)
+ idpf_add_empty_queue_stats(&data, qtype);
+ else
+ idpf_add_queue_stats(&data, txq);
+ }
+ }
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ /* It is critical we provide a constant number of stats back to
+ * userspace regardless of how many queues are actually in use because
+ * there is no way to inform userspace the size has changed between
+ * ioctl calls. This will fill in any missing stats with zero.
+ */
+ for (; total < vport_config->max_q.max_txq; total++)
+ idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
+ total = 0;
+
+ is_splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ u16 num_rxq;
+
+ qtype = VIRTCHNL2_QUEUE_TYPE_RX;
+
+ if (is_splitq)
+ num_rxq = rxq_grp->splitq.num_rxq_sets;
+ else
+ num_rxq = rxq_grp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++, total++) {
+ struct idpf_queue *rxq;
+
+ if (is_splitq)
+ rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
+ else
+ rxq = rxq_grp->singleq.rxqs[j];
+ if (!rxq)
+ idpf_add_empty_queue_stats(&data, qtype);
+ else
+ idpf_add_queue_stats(&data, rxq);
+
+ /* In splitq mode, don't get page pool stats here since
+ * the pools are attached to the buffer queues
+ */
+ if (is_splitq)
+ continue;
+
+ if (rxq)
+ page_pool_get_stats(rxq->pp, &pp_stats);
+ }
+ }
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_queue *rxbufq =
+ &vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
+
+ page_pool_get_stats(rxbufq->pp, &pp_stats);
+ }
+ }
+
+ for (; total < vport_config->max_q.max_rxq; total++)
+ idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
+
+ page_pool_ethtool_stats_get(data, &pp_stats);
+
+ rcu_read_unlock();
+
+ idpf_vport_ctrl_unlock(netdev);
+}
+
+/**
+ * idpf_find_rxq - find rxq from q index
+ * @vport: virtual port associated to queue
+ * @q_num: q index used to find queue
+ *
+ * returns pointer to rx queue
+ */
+static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num)
+{
+ int q_grp, q_idx;
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ return vport->rxq_grps->singleq.rxqs[q_num];
+
+ q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
+ q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
+
+ return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq;
+}
+
+/**
+ * idpf_find_txq - find txq from q index
+ * @vport: virtual port associated to queue
+ * @q_num: q index used to find queue
+ *
+ * returns pointer to tx queue
+ */
+static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num)
+{
+ int q_grp;
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ return vport->txqs[q_num];
+
+ q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
+
+ return vport->txq_grps[q_grp].complq;
+}
+
+/**
+ * __idpf_get_q_coalesce - get ITR values for specific queue
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ * @q: quuee of Rx or Tx
+ */
+static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
+ struct idpf_queue *q)
+{
+ if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ ec->use_adaptive_rx_coalesce =
+ IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode);
+ ec->rx_coalesce_usecs = q->q_vector->rx_itr_value;
+ } else {
+ ec->use_adaptive_tx_coalesce =
+ IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode);
+ ec->tx_coalesce_usecs = q->q_vector->tx_itr_value;
+ }
+}
+
+/**
+ * idpf_get_q_coalesce - get ITR values for specific queue
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_get_q_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ u32 q_num)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ int err = 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (np->state != __IDPF_VPORT_UP)
+ goto unlock_mutex;
+
+ if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ if (q_num < vport->num_rxq)
+ __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num));
+
+ if (q_num < vport->num_txq)
+ __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num));
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_get_coalesce - get ITR values as requested by user
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: coalesce settings to be filled
+ * @kec: unused
+ * @extack: unused
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *extack)
+{
+ /* Return coalesce based on queue number zero */
+ return idpf_get_q_coalesce(netdev, ec, 0);
+}
+
+/**
+ * idpf_get_per_q_coalesce - get ITR values as requested by user
+ * @netdev: pointer to the netdev associated with this query
+ * @q_num: queue for which the itr values has to retrieved
+ * @ec: coalesce settings to be filled
+ *
+ * Return 0 on success, and negative on failure
+ */
+
+static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
+{
+ return idpf_get_q_coalesce(netdev, ec, q_num);
+}
+
+/**
+ * __idpf_set_q_coalesce - set ITR values for specific queue
+ * @ec: ethtool structure from user to update ITR settings
+ * @q: queue for which itr values has to be set
+ * @is_rxq: is queue type rx
+ *
+ * Returns 0 on success, negative otherwise.
+ */
+static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
+ struct idpf_queue *q, bool is_rxq)
+{
+ u32 use_adaptive_coalesce, coalesce_usecs;
+ struct idpf_q_vector *qv = q->q_vector;
+ bool is_dim_ena = false;
+ u16 itr_val;
+
+ if (is_rxq) {
+ is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
+ use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
+ coalesce_usecs = ec->rx_coalesce_usecs;
+ itr_val = qv->rx_itr_value;
+ } else {
+ is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
+ use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
+ coalesce_usecs = ec->tx_coalesce_usecs;
+ itr_val = qv->tx_itr_value;
+ }
+ if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
+ netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
+
+ return -EINVAL;
+ }
+
+ if (is_dim_ena && use_adaptive_coalesce)
+ return 0;
+
+ if (coalesce_usecs > IDPF_ITR_MAX) {
+ netdev_err(q->vport->netdev,
+ "Invalid value, %d-usecs range is 0-%d\n",
+ coalesce_usecs, IDPF_ITR_MAX);
+
+ return -EINVAL;
+ }
+
+ if (coalesce_usecs % 2) {
+ coalesce_usecs--;
+ netdev_info(q->vport->netdev,
+ "HW only supports even ITR values, ITR rounded to %d\n",
+ coalesce_usecs);
+ }
+
+ if (is_rxq) {
+ qv->rx_itr_value = coalesce_usecs;
+ if (use_adaptive_coalesce) {
+ qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ } else {
+ qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
+ false);
+ }
+ } else {
+ qv->tx_itr_value = coalesce_usecs;
+ if (use_adaptive_coalesce) {
+ qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ } else {
+ qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
+ }
+ }
+
+ /* Update of static/dynamic itr will be taken care when interrupt is
+ * fired
+ */
+ return 0;
+}
+
+/**
+ * idpf_set_q_coalesce - set ITR values for specific queue
+ * @vport: vport associated to the queue that need updating
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ * @is_rxq: is queue type rx
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_set_q_coalesce(struct idpf_vport *vport,
+ struct ethtool_coalesce *ec,
+ int q_num, bool is_rxq)
+{
+ struct idpf_queue *q;
+
+ q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num);
+
+ if (q && __idpf_set_q_coalesce(ec, q, is_rxq))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * idpf_set_coalesce - set ITR values as requested by user
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: coalesce settings to program the device with
+ * @kec: unused
+ * @extack: unused
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *extack)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ int i, err = 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (np->state != __IDPF_VPORT_UP)
+ goto unlock_mutex;
+
+ for (i = 0; i < vport->num_txq; i++) {
+ err = idpf_set_q_coalesce(vport, ec, i, false);
+ if (err)
+ goto unlock_mutex;
+ }
+
+ for (i = 0; i < vport->num_rxq; i++) {
+ err = idpf_set_q_coalesce(vport, ec, i, true);
+ if (err)
+ goto unlock_mutex;
+ }
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_set_per_q_coalesce - set ITR values as requested by user
+ * @netdev: pointer to the netdev associated with this query
+ * @q_num: queue for which the itr values has to be set
+ * @ec: coalesce settings to program the device with
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ err = idpf_set_q_coalesce(vport, ec, q_num, false);
+ if (err) {
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+ }
+
+ err = idpf_set_q_coalesce(vport, ec, q_num, true);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ */
+static u32 idpf_get_msglevel(struct net_device *netdev)
+{
+ struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
+
+ return adapter->msg_enable;
+}
+
+/**
+ * idpf_set_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ */
+static void idpf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
+
+ adapter->msg_enable = data;
+}
+
+/**
+ * idpf_get_link_ksettings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @cmd: ethtool command
+ *
+ * Reports speed/duplex settings.
+ **/
+static int idpf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
+ if (vport->link_up) {
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = vport->link_speed_mbps;
+ } else {
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ }
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+}
+
+static const struct ethtool_ops idpf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .get_msglevel = idpf_get_msglevel,
+ .set_msglevel = idpf_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = idpf_get_coalesce,
+ .set_coalesce = idpf_set_coalesce,
+ .get_per_queue_coalesce = idpf_get_per_q_coalesce,
+ .set_per_queue_coalesce = idpf_set_per_q_coalesce,
+ .get_ethtool_stats = idpf_get_ethtool_stats,
+ .get_strings = idpf_get_strings,
+ .get_sset_count = idpf_get_sset_count,
+ .get_channels = idpf_get_channels,
+ .get_rxnfc = idpf_get_rxnfc,
+ .get_rxfh_key_size = idpf_get_rxfh_key_size,
+ .get_rxfh_indir_size = idpf_get_rxfh_indir_size,
+ .get_rxfh = idpf_get_rxfh,
+ .set_rxfh = idpf_set_rxfh,
+ .set_channels = idpf_set_channels,
+ .get_ringparam = idpf_get_ringparam,
+ .set_ringparam = idpf_set_ringparam,
+ .get_link_ksettings = idpf_get_link_ksettings,
+};
+
+/**
+ * idpf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ */
+void idpf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &idpf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
new file mode 100644
index 000000000000..24edb8a6ec2e
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_LAN_PF_REGS_H_
+#define _IDPF_LAN_PF_REGS_H_
+
+/* Receive queues */
+#define PF_QRX_BASE 0x00000000
+#define PF_QRX_TAIL(_QRX) (PF_QRX_BASE + (((_QRX) * 0x1000)))
+#define PF_QRX_BUFFQ_BASE 0x03000000
+#define PF_QRX_BUFFQ_TAIL(_QRX) (PF_QRX_BUFFQ_BASE + (((_QRX) * 0x1000)))
+
+/* Transmit queues */
+#define PF_QTX_BASE 0x05000000
+#define PF_QTX_COMM_DBELL(_DBQM) (PF_QTX_BASE + ((_DBQM) * 0x1000))
+
+/* Control(PF Mailbox) Queue */
+#define PF_FW_BASE 0x08400000
+
+#define PF_FW_ARQBAL (PF_FW_BASE)
+#define PF_FW_ARQBAH (PF_FW_BASE + 0x4)
+#define PF_FW_ARQLEN (PF_FW_BASE + 0x8)
+#define PF_FW_ARQLEN_ARQLEN_S 0
+#define PF_FW_ARQLEN_ARQLEN_M GENMASK(12, 0)
+#define PF_FW_ARQLEN_ARQVFE_S 28
+#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S)
+#define PF_FW_ARQLEN_ARQOVFL_S 29
+#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S)
+#define PF_FW_ARQLEN_ARQCRIT_S 30
+#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S)
+#define PF_FW_ARQLEN_ARQENABLE_S 31
+#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S)
+#define PF_FW_ARQH (PF_FW_BASE + 0xC)
+#define PF_FW_ARQH_ARQH_S 0
+#define PF_FW_ARQH_ARQH_M GENMASK(12, 0)
+#define PF_FW_ARQT (PF_FW_BASE + 0x10)
+
+#define PF_FW_ATQBAL (PF_FW_BASE + 0x14)
+#define PF_FW_ATQBAH (PF_FW_BASE + 0x18)
+#define PF_FW_ATQLEN (PF_FW_BASE + 0x1C)
+#define PF_FW_ATQLEN_ATQLEN_S 0
+#define PF_FW_ATQLEN_ATQLEN_M GENMASK(9, 0)
+#define PF_FW_ATQLEN_ATQVFE_S 28
+#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S)
+#define PF_FW_ATQLEN_ATQOVFL_S 29
+#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S)
+#define PF_FW_ATQLEN_ATQCRIT_S 30
+#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S)
+#define PF_FW_ATQLEN_ATQENABLE_S 31
+#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
+#define PF_FW_ATQH (PF_FW_BASE + 0x20)
+#define PF_FW_ATQH_ATQH_S 0
+#define PF_FW_ATQH_ATQH_M GENMASK(9, 0)
+#define PF_FW_ATQT (PF_FW_BASE + 0x24)
+
+/* Interrupts */
+#define PF_GLINT_BASE 0x08900000
+#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000))
+#define PF_GLINT_DYN_CTL_INTENA_S 0
+#define PF_GLINT_DYN_CTL_INTENA_M BIT(PF_GLINT_DYN_CTL_INTENA_S)
+#define PF_GLINT_DYN_CTL_CLEARPBA_S 1
+#define PF_GLINT_DYN_CTL_CLEARPBA_M BIT(PF_GLINT_DYN_CTL_CLEARPBA_S)
+#define PF_GLINT_DYN_CTL_SWINT_TRIG_S 2
+#define PF_GLINT_DYN_CTL_SWINT_TRIG_M BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S)
+#define PF_GLINT_DYN_CTL_ITR_INDX_S 3
+#define PF_GLINT_DYN_CTL_ITR_INDX_M GENMASK(4, 3)
+#define PF_GLINT_DYN_CTL_INTERVAL_S 5
+#define PF_GLINT_DYN_CTL_INTERVAL_M BIT(PF_GLINT_DYN_CTL_INTERVAL_S)
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S)
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_S 25
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_S)
+#define PF_GLINT_DYN_CTL_WB_ON_ITR_S 30
+#define PF_GLINT_DYN_CTL_WB_ON_ITR_M BIT(PF_GLINT_DYN_CTL_WB_ON_ITR_S)
+#define PF_GLINT_DYN_CTL_INTENA_MSK_S 31
+#define PF_GLINT_DYN_CTL_INTENA_MSK_M BIT(PF_GLINT_DYN_CTL_INTENA_MSK_S)
+/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is
+ * spacing b/w itrn registers of the same vector.
+ */
+#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \
+ ((_reg_start) + ((_ITR) * (_itrn_indx_spacing)))
+/* For PF, itrn_indx_spacing is 4 and itrn_reg_spacing is 0x1000 */
+#define PF_GLINT_ITR(_ITR, _INT) \
+ (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000))
+#define PF_GLINT_ITR_MAX_INDEX 2
+#define PF_GLINT_ITR_INTERVAL_S 0
+#define PF_GLINT_ITR_INTERVAL_M GENMASK(11, 0)
+
+/* Generic registers */
+#define PF_INT_DIR_OICR_ENA 0x08406000
+#define PF_INT_DIR_OICR_ENA_S 0
+#define PF_INT_DIR_OICR_ENA_M GENMASK(31, 0)
+#define PF_INT_DIR_OICR 0x08406004
+#define PF_INT_DIR_OICR_TSYN_EVNT 0
+#define PF_INT_DIR_OICR_PHY_TS_0 BIT(1)
+#define PF_INT_DIR_OICR_PHY_TS_1 BIT(2)
+#define PF_INT_DIR_OICR_CAUSE 0x08406008
+#define PF_INT_DIR_OICR_CAUSE_CAUSE_S 0
+#define PF_INT_DIR_OICR_CAUSE_CAUSE_M GENMASK(31, 0)
+#define PF_INT_PBA_CLEAR 0x0840600C
+
+#define PF_FUNC_RID 0x08406010
+#define PF_FUNC_RID_FUNCTION_NUMBER_S 0
+#define PF_FUNC_RID_FUNCTION_NUMBER_M GENMASK(2, 0)
+#define PF_FUNC_RID_DEVICE_NUMBER_S 3
+#define PF_FUNC_RID_DEVICE_NUMBER_M GENMASK(7, 3)
+#define PF_FUNC_RID_BUS_NUMBER_S 8
+#define PF_FUNC_RID_BUS_NUMBER_M GENMASK(15, 8)
+
+/* Reset registers */
+#define PFGEN_RTRIG 0x08407000
+#define PFGEN_RTRIG_CORER_S 0
+#define PFGEN_RTRIG_CORER_M BIT(0)
+#define PFGEN_RTRIG_LINKR_S 1
+#define PFGEN_RTRIG_LINKR_M BIT(1)
+#define PFGEN_RTRIG_IMCR_S 2
+#define PFGEN_RTRIG_IMCR_M BIT(2)
+#define PFGEN_RSTAT 0x08407008 /* PFR Status */
+#define PFGEN_RSTAT_PFR_STATE_S 0
+#define PFGEN_RSTAT_PFR_STATE_M GENMASK(1, 0)
+#define PFGEN_CTRL 0x0840700C
+#define PFGEN_CTRL_PFSWR BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
new file mode 100644
index 000000000000..a5752dcab888
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_LAN_TXRX_H_
+#define _IDPF_LAN_TXRX_H_
+
+enum idpf_rss_hash {
+ IDPF_HASH_INVALID = 0,
+ /* Values 1 - 28 are reserved for future use */
+ IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29,
+ IDPF_HASH_NONF_MULTICAST_IPV4_UDP,
+ IDPF_HASH_NONF_IPV4_UDP,
+ IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK,
+ IDPF_HASH_NONF_IPV4_TCP,
+ IDPF_HASH_NONF_IPV4_SCTP,
+ IDPF_HASH_NONF_IPV4_OTHER,
+ IDPF_HASH_FRAG_IPV4,
+ /* Values 37-38 are reserved */
+ IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39,
+ IDPF_HASH_NONF_MULTICAST_IPV6_UDP,
+ IDPF_HASH_NONF_IPV6_UDP,
+ IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK,
+ IDPF_HASH_NONF_IPV6_TCP,
+ IDPF_HASH_NONF_IPV6_SCTP,
+ IDPF_HASH_NONF_IPV6_OTHER,
+ IDPF_HASH_FRAG_IPV6,
+ IDPF_HASH_NONF_RSVD47,
+ IDPF_HASH_NONF_FCOE_OX,
+ IDPF_HASH_NONF_FCOE_RX,
+ IDPF_HASH_NONF_FCOE_OTHER,
+ /* Values 51-62 are reserved */
+ IDPF_HASH_L2_PAYLOAD = 63,
+
+ IDPF_HASH_MAX
+};
+
+/* Supported RSS offloads */
+#define IDPF_DEFAULT_RSS_HASH \
+ (BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \
+ BIT_ULL(IDPF_HASH_FRAG_IPV4) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_TCP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_SCTP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_OTHER) | \
+ BIT_ULL(IDPF_HASH_FRAG_IPV6) | \
+ BIT_ULL(IDPF_HASH_L2_PAYLOAD))
+
+#define IDPF_DEFAULT_RSS_HASH_EXPANDED (IDPF_DEFAULT_RSS_HASH | \
+ BIT_ULL(IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV6_UDP))
+
+/* For idpf_splitq_base_tx_compl_desc */
+#define IDPF_TXD_COMPLQ_GEN_S 15
+#define IDPF_TXD_COMPLQ_GEN_M BIT_ULL(IDPF_TXD_COMPLQ_GEN_S)
+#define IDPF_TXD_COMPLQ_COMPL_TYPE_S 11
+#define IDPF_TXD_COMPLQ_COMPL_TYPE_M GENMASK_ULL(13, 11)
+#define IDPF_TXD_COMPLQ_QID_S 0
+#define IDPF_TXD_COMPLQ_QID_M GENMASK_ULL(9, 0)
+
+/* For base mode TX descriptors */
+
+#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S 23
+#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S)
+#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_S 19
+#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_M \
+ (0xFULL << IDPF_TXD_CTX_QW0_TUNN_DECTTL_S)
+#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_S 12
+#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_M \
+ (0X7FULL << IDPF_TXD_CTX_QW0_TUNN_NATLEN_S)
+#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S 11
+#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M \
+ BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S)
+#define IDPF_TXD_CTX_EIP_NOINC_IPID_CONST \
+ IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M
+#define IDPF_TXD_CTX_QW0_TUNN_NATT_S 9
+#define IDPF_TXD_CTX_QW0_TUNN_NATT_M (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)
+#define IDPF_TXD_CTX_UDP_TUNNELING BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_NATT_S)
+#define IDPF_TXD_CTX_GRE_TUNNELING (0x2ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S 2
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M \
+ (0x3FULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S)
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S 0
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_M \
+ (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S)
+
+#define IDPF_TXD_CTX_QW1_MSS_S 50
+#define IDPF_TXD_CTX_QW1_MSS_M GENMASK_ULL(63, 50)
+#define IDPF_TXD_CTX_QW1_TSO_LEN_S 30
+#define IDPF_TXD_CTX_QW1_TSO_LEN_M GENMASK_ULL(47, 30)
+#define IDPF_TXD_CTX_QW1_CMD_S 4
+#define IDPF_TXD_CTX_QW1_CMD_M GENMASK_ULL(15, 4)
+#define IDPF_TXD_CTX_QW1_DTYPE_S 0
+#define IDPF_TXD_CTX_QW1_DTYPE_M GENMASK_ULL(3, 0)
+#define IDPF_TXD_QW1_L2TAG1_S 48
+#define IDPF_TXD_QW1_L2TAG1_M GENMASK_ULL(63, 48)
+#define IDPF_TXD_QW1_TX_BUF_SZ_S 34
+#define IDPF_TXD_QW1_TX_BUF_SZ_M GENMASK_ULL(47, 34)
+#define IDPF_TXD_QW1_OFFSET_S 16
+#define IDPF_TXD_QW1_OFFSET_M GENMASK_ULL(33, 16)
+#define IDPF_TXD_QW1_CMD_S 4
+#define IDPF_TXD_QW1_CMD_M GENMASK_ULL(15, 4)
+#define IDPF_TXD_QW1_DTYPE_S 0
+#define IDPF_TXD_QW1_DTYPE_M GENMASK_ULL(3, 0)
+
+/* TX Completion Descriptor Completion Types */
+#define IDPF_TXD_COMPLT_ITR_FLUSH 0
+/* Descriptor completion type 1 is reserved */
+#define IDPF_TXD_COMPLT_RS 2
+/* Descriptor completion type 3 is reserved */
+#define IDPF_TXD_COMPLT_RE 4
+#define IDPF_TXD_COMPLT_SW_MARKER 5
+
+enum idpf_tx_desc_dtype_value {
+ IDPF_TX_DESC_DTYPE_DATA = 0,
+ IDPF_TX_DESC_DTYPE_CTX = 1,
+ /* DTYPE 2 is reserved
+ * DTYPE 3 is free for future use
+ * DTYPE 4 is reserved
+ */
+ IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5,
+ /* DTYPE 6 is reserved */
+ IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 = 7,
+ /* DTYPE 8, 9 are free for future use
+ * DTYPE 10 is reserved
+ * DTYPE 11 is free for future use
+ */
+ IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE = 12,
+ /* DTYPE 13, 14 are free for future use */
+
+ /* DESC_DONE - HW has completed write-back of descriptor */
+ IDPF_TX_DESC_DTYPE_DESC_DONE = 15,
+};
+
+enum idpf_tx_ctx_desc_cmd_bits {
+ IDPF_TX_CTX_DESC_TSO = 0x01,
+ IDPF_TX_CTX_DESC_TSYN = 0x02,
+ IDPF_TX_CTX_DESC_IL2TAG2 = 0x04,
+ IDPF_TX_CTX_DESC_RSVD = 0x08,
+ IDPF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ IDPF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ IDPF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ IDPF_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ IDPF_TX_CTX_DESC_FILT_AU_EN = 0x40,
+ IDPF_TX_CTX_DESC_FILT_AU_EVICT = 0x80,
+ IDPF_TX_CTX_DESC_RSVD1 = 0xF00
+};
+
+enum idpf_tx_desc_len_fields {
+ /* Note: These are predefined bit offsets */
+ IDPF_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */
+ IDPF_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */
+ IDPF_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */
+};
+
+enum idpf_tx_base_desc_cmd_bits {
+ IDPF_TX_DESC_CMD_EOP = BIT(0),
+ IDPF_TX_DESC_CMD_RS = BIT(1),
+ /* only on VFs else RSVD */
+ IDPF_TX_DESC_CMD_ICRC = BIT(2),
+ IDPF_TX_DESC_CMD_IL2TAG1 = BIT(3),
+ IDPF_TX_DESC_CMD_RSVD1 = BIT(4),
+ IDPF_TX_DESC_CMD_IIPT_IPV6 = BIT(5),
+ IDPF_TX_DESC_CMD_IIPT_IPV4 = BIT(6),
+ IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM = GENMASK(6, 5),
+ IDPF_TX_DESC_CMD_RSVD2 = BIT(7),
+ IDPF_TX_DESC_CMD_L4T_EOFT_TCP = BIT(8),
+ IDPF_TX_DESC_CMD_L4T_EOFT_SCTP = BIT(9),
+ IDPF_TX_DESC_CMD_L4T_EOFT_UDP = GENMASK(9, 8),
+ IDPF_TX_DESC_CMD_RSVD3 = BIT(10),
+ IDPF_TX_DESC_CMD_RSVD4 = BIT(11),
+};
+
+/* Transmit descriptors */
+/* splitq tx buf, singleq tx buf and singleq compl desc */
+struct idpf_base_tx_desc {
+ __le64 buf_addr; /* Address of descriptor's data buf */
+ __le64 qw1; /* type_cmd_offset_bsz_l2tag1 */
+}; /* read used with buffer queues */
+
+struct idpf_splitq_tx_compl_desc {
+ /* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */
+ __le16 qid_comptype_gen;
+ union {
+ __le16 q_head; /* Queue head */
+ __le16 compl_tag; /* Completion tag */
+ } q_head_compl_tag;
+ u8 ts[3];
+ u8 rsvd; /* Reserved */
+}; /* writeback used with completion queues */
+
+/* Context descriptors */
+struct idpf_base_tx_ctx_desc {
+ struct {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd1;
+ } qw0;
+ __le64 qw1; /* type_cmd_tlen_mss/rt_hint */
+};
+
+/* Common cmd field defines for all desc except Flex Flow Scheduler (0x0C) */
+enum idpf_tx_flex_desc_cmd_bits {
+ IDPF_TX_FLEX_DESC_CMD_EOP = BIT(0),
+ IDPF_TX_FLEX_DESC_CMD_RS = BIT(1),
+ IDPF_TX_FLEX_DESC_CMD_RE = BIT(2),
+ IDPF_TX_FLEX_DESC_CMD_IL2TAG1 = BIT(3),
+ IDPF_TX_FLEX_DESC_CMD_DUMMY = BIT(4),
+ IDPF_TX_FLEX_DESC_CMD_CS_EN = BIT(5),
+ IDPF_TX_FLEX_DESC_CMD_FILT_AU_EN = BIT(6),
+ IDPF_TX_FLEX_DESC_CMD_FILT_AU_EVICT = BIT(7),
+};
+
+struct idpf_flex_tx_desc {
+ __le64 buf_addr; /* Packet buffer address */
+ struct {
+#define IDPF_FLEX_TXD_QW1_DTYPE_S 0
+#define IDPF_FLEX_TXD_QW1_DTYPE_M GENMASK(4, 0)
+#define IDPF_FLEX_TXD_QW1_CMD_S 5
+#define IDPF_FLEX_TXD_QW1_CMD_M GENMASK(15, 5)
+ __le16 cmd_dtype;
+ /* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */
+ struct {
+ __le16 l2tag1;
+ __le16 l2tag2;
+ } l2tags;
+ __le16 buf_size;
+ } qw1;
+};
+
+struct idpf_flex_tx_sched_desc {
+ __le64 buf_addr; /* Packet buffer address */
+
+ /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE_16B (0x0C) */
+ struct {
+ u8 cmd_dtype;
+#define IDPF_TXD_FLEX_FLOW_DTYPE_M GENMASK(4, 0)
+#define IDPF_TXD_FLEX_FLOW_CMD_EOP BIT(5)
+#define IDPF_TXD_FLEX_FLOW_CMD_CS_EN BIT(6)
+#define IDPF_TXD_FLEX_FLOW_CMD_RE BIT(7)
+
+ /* [23:23] Horizon Overflow bit, [22:0] timestamp */
+ u8 ts[3];
+#define IDPF_TXD_FLOW_SCH_HORIZON_OVERFLOW_M BIT(7)
+
+ __le16 compl_tag;
+ __le16 rxr_bufsize;
+#define IDPF_TXD_FLEX_FLOW_RXR BIT(14)
+#define IDPF_TXD_FLEX_FLOW_BUFSIZE_M GENMASK(13, 0)
+ } qw1;
+};
+
+/* Common cmd fields for all flex context descriptors
+ * Note: these defines already account for the 5 bit dtype in the cmd_dtype
+ * field
+ */
+enum idpf_tx_flex_ctx_desc_cmd_bits {
+ IDPF_TX_FLEX_CTX_DESC_CMD_TSO = BIT(5),
+ IDPF_TX_FLEX_CTX_DESC_CMD_TSYN_EN = BIT(6),
+ IDPF_TX_FLEX_CTX_DESC_CMD_L2TAG2 = BIT(7),
+ IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK = BIT(9),
+ IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_LOCAL = BIT(10),
+ IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI = GENMASK(10, 9),
+};
+
+/* Standard flex descriptor TSO context quad word */
+struct idpf_flex_tx_tso_ctx_qw {
+ __le32 flex_tlen;
+#define IDPF_TXD_FLEX_CTX_TLEN_M GENMASK(17, 0)
+#define IDPF_TXD_FLEX_TSO_CTX_FLEX_S 24
+ __le16 mss_rt;
+#define IDPF_TXD_FLEX_CTX_MSS_RT_M GENMASK(13, 0)
+ u8 hdr_len;
+ u8 flex;
+};
+
+struct idpf_flex_tx_ctx_desc {
+ /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
+ struct {
+ struct idpf_flex_tx_tso_ctx_qw qw0;
+ struct {
+ __le16 cmd_dtype;
+ u8 flex[6];
+ } qw1;
+ } tso;
+};
+#endif /* _IDPF_LAN_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h
new file mode 100644
index 000000000000..3d73b6c76863
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_LAN_VF_REGS_H_
+#define _IDPF_LAN_VF_REGS_H_
+
+/* Reset */
+#define VFGEN_RSTAT 0x00008800
+#define VFGEN_RSTAT_VFR_STATE_S 0
+#define VFGEN_RSTAT_VFR_STATE_M GENMASK(1, 0)
+
+/* Control(VF Mailbox) Queue */
+#define VF_BASE 0x00006000
+
+#define VF_ATQBAL (VF_BASE + 0x1C00)
+#define VF_ATQBAH (VF_BASE + 0x1800)
+#define VF_ATQLEN (VF_BASE + 0x0800)
+#define VF_ATQLEN_ATQLEN_S 0
+#define VF_ATQLEN_ATQLEN_M GENMASK(9, 0)
+#define VF_ATQLEN_ATQVFE_S 28
+#define VF_ATQLEN_ATQVFE_M BIT(VF_ATQLEN_ATQVFE_S)
+#define VF_ATQLEN_ATQOVFL_S 29
+#define VF_ATQLEN_ATQOVFL_M BIT(VF_ATQLEN_ATQOVFL_S)
+#define VF_ATQLEN_ATQCRIT_S 30
+#define VF_ATQLEN_ATQCRIT_M BIT(VF_ATQLEN_ATQCRIT_S)
+#define VF_ATQLEN_ATQENABLE_S 31
+#define VF_ATQLEN_ATQENABLE_M BIT(VF_ATQLEN_ATQENABLE_S)
+#define VF_ATQH (VF_BASE + 0x0400)
+#define VF_ATQH_ATQH_S 0
+#define VF_ATQH_ATQH_M GENMASK(9, 0)
+#define VF_ATQT (VF_BASE + 0x2400)
+
+#define VF_ARQBAL (VF_BASE + 0x0C00)
+#define VF_ARQBAH (VF_BASE)
+#define VF_ARQLEN (VF_BASE + 0x2000)
+#define VF_ARQLEN_ARQLEN_S 0
+#define VF_ARQLEN_ARQLEN_M GENMASK(9, 0)
+#define VF_ARQLEN_ARQVFE_S 28
+#define VF_ARQLEN_ARQVFE_M BIT(VF_ARQLEN_ARQVFE_S)
+#define VF_ARQLEN_ARQOVFL_S 29
+#define VF_ARQLEN_ARQOVFL_M BIT(VF_ARQLEN_ARQOVFL_S)
+#define VF_ARQLEN_ARQCRIT_S 30
+#define VF_ARQLEN_ARQCRIT_M BIT(VF_ARQLEN_ARQCRIT_S)
+#define VF_ARQLEN_ARQENABLE_S 31
+#define VF_ARQLEN_ARQENABLE_M BIT(VF_ARQLEN_ARQENABLE_S)
+#define VF_ARQH (VF_BASE + 0x1400)
+#define VF_ARQH_ARQH_S 0
+#define VF_ARQH_ARQH_M GENMASK(12, 0)
+#define VF_ARQT (VF_BASE + 0x1000)
+
+/* Transmit queues */
+#define VF_QTX_TAIL_BASE 0x00000000
+#define VF_QTX_TAIL(_QTX) (VF_QTX_TAIL_BASE + (_QTX) * 0x4)
+#define VF_QTX_TAIL_EXT_BASE 0x00040000
+#define VF_QTX_TAIL_EXT(_QTX) (VF_QTX_TAIL_EXT_BASE + ((_QTX) * 4))
+
+/* Receive queues */
+#define VF_QRX_TAIL_BASE 0x00002000
+#define VF_QRX_TAIL(_QRX) (VF_QRX_TAIL_BASE + ((_QRX) * 4))
+#define VF_QRX_TAIL_EXT_BASE 0x00050000
+#define VF_QRX_TAIL_EXT(_QRX) (VF_QRX_TAIL_EXT_BASE + ((_QRX) * 4))
+#define VF_QRXB_TAIL_BASE 0x00060000
+#define VF_QRXB_TAIL(_QRX) (VF_QRXB_TAIL_BASE + ((_QRX) * 4))
+
+/* Interrupts */
+#define VF_INT_DYN_CTL0 0x00005C00
+#define VF_INT_DYN_CTL0_INTENA_S 0
+#define VF_INT_DYN_CTL0_INTENA_M BIT(VF_INT_DYN_CTL0_INTENA_S)
+#define VF_INT_DYN_CTL0_ITR_INDX_S 3
+#define VF_INT_DYN_CTL0_ITR_INDX_M GENMASK(4, 3)
+#define VF_INT_DYN_CTLN(_INT) (0x00003800 + ((_INT) * 4))
+#define VF_INT_DYN_CTLN_EXT(_INT) (0x00070000 + ((_INT) * 4))
+#define VF_INT_DYN_CTLN_INTENA_S 0
+#define VF_INT_DYN_CTLN_INTENA_M BIT(VF_INT_DYN_CTLN_INTENA_S)
+#define VF_INT_DYN_CTLN_CLEARPBA_S 1
+#define VF_INT_DYN_CTLN_CLEARPBA_M BIT(VF_INT_DYN_CTLN_CLEARPBA_S)
+#define VF_INT_DYN_CTLN_SWINT_TRIG_S 2
+#define VF_INT_DYN_CTLN_SWINT_TRIG_M BIT(VF_INT_DYN_CTLN_SWINT_TRIG_S)
+#define VF_INT_DYN_CTLN_ITR_INDX_S 3
+#define VF_INT_DYN_CTLN_ITR_INDX_M GENMASK(4, 3)
+#define VF_INT_DYN_CTLN_INTERVAL_S 5
+#define VF_INT_DYN_CTLN_INTERVAL_M BIT(VF_INT_DYN_CTLN_INTERVAL_S)
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S 24
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S)
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_S 25
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_S)
+#define VF_INT_DYN_CTLN_WB_ON_ITR_S 30
+#define VF_INT_DYN_CTLN_WB_ON_ITR_M BIT(VF_INT_DYN_CTLN_WB_ON_ITR_S)
+#define VF_INT_DYN_CTLN_INTENA_MSK_S 31
+#define VF_INT_DYN_CTLN_INTENA_MSK_M BIT(VF_INT_DYN_CTLN_INTENA_MSK_S)
+/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is spacing
+ * b/w itrn registers of the same vector
+ */
+#define VF_INT_ITR0(_ITR) (0x00004C00 + ((_ITR) * 4))
+#define VF_INT_ITRN_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \
+ ((_reg_start) + ((_ITR) * (_itrn_indx_spacing)))
+/* For VF with 16 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing
+ * is 0x40 and base register offset is 0x00002800
+ */
+#define VF_INT_ITRN(_INT, _ITR) \
+ (0x00002800 + ((_INT) * 4) + ((_ITR) * 0x40))
+/* For VF with 64 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing
+ * is 0x100 and base register offset is 0x00002C00
+ */
+#define VF_INT_ITRN_64(_INT, _ITR) \
+ (0x00002C00 + ((_INT) * 4) + ((_ITR) * 0x100))
+/* For VF with 2k vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing
+ * is 0x2000 and base register offset is 0x00072000
+ */
+#define VF_INT_ITRN_2K(_INT, _ITR) \
+ (0x00072000 + ((_INT) * 4) + ((_ITR) * 0x2000))
+#define VF_INT_ITRN_MAX_INDEX 2
+#define VF_INT_ITRN_INTERVAL_S 0
+#define VF_INT_ITRN_INTERVAL_M GENMASK(11, 0)
+#define VF_INT_PBA_CLEAR 0x00008900
+
+#define VF_INT_ICR0_ENA1 0x00005000
+#define VF_INT_ICR0_ENA1_ADMINQ_S 30
+#define VF_INT_ICR0_ENA1_ADMINQ_M BIT(VF_INT_ICR0_ENA1_ADMINQ_S)
+#define VF_INT_ICR0_ENA1_RSVD_S 31
+#define VF_INT_ICR01 0x00004800
+#define VF_QF_HENA(_i) (0x0000C400 + ((_i) * 4))
+#define VF_QF_HENA_MAX_INDX 1
+#define VF_QF_HKEY(_i) (0x0000CC00 + ((_i) * 4))
+#define VF_QF_HKEY_MAX_INDX 12
+#define VF_QF_HLUT(_i) (0x0000D000 + ((_i) * 4))
+#define VF_QF_HLUT_MAX_INDX 15
+#endif
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
new file mode 100644
index 000000000000..19809b0ddcd9
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -0,0 +1,2379 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+static const struct net_device_ops idpf_netdev_ops_splitq;
+static const struct net_device_ops idpf_netdev_ops_singleq;
+
+const char * const idpf_vport_vc_state_str[] = {
+ IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
+};
+
+/**
+ * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
+ * @adapter: private data struct
+ *
+ * Return 0 on success, error on failure
+ */
+static int idpf_init_vector_stack(struct idpf_adapter *adapter)
+{
+ struct idpf_vector_lifo *stack;
+ u16 min_vec;
+ u32 i;
+
+ mutex_lock(&adapter->vector_lock);
+ min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
+ stack = &adapter->vector_stack;
+ stack->size = adapter->num_msix_entries;
+ /* set the base and top to point at start of the 'free pool' to
+ * distribute the unused vectors on-demand basis
+ */
+ stack->base = min_vec;
+ stack->top = min_vec;
+
+ stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
+ if (!stack->vec_idx) {
+ mutex_unlock(&adapter->vector_lock);
+
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < stack->size; i++)
+ stack->vec_idx[i] = i;
+
+ mutex_unlock(&adapter->vector_lock);
+
+ return 0;
+}
+
+/**
+ * idpf_deinit_vector_stack - zero out the MSIX vector stack
+ * @adapter: private data struct
+ */
+static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
+{
+ struct idpf_vector_lifo *stack;
+
+ mutex_lock(&adapter->vector_lock);
+ stack = &adapter->vector_stack;
+ kfree(stack->vec_idx);
+ stack->vec_idx = NULL;
+ mutex_unlock(&adapter->vector_lock);
+}
+
+/**
+ * idpf_mb_intr_rel_irq - Free the IRQ association with the OS
+ * @adapter: adapter structure
+ *
+ * This will also disable interrupt mode and queue up mailbox task. Mailbox
+ * task will reschedule itself if not in interrupt mode.
+ */
+static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
+{
+ clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
+ free_irq(adapter->msix_entries[0].vector, adapter);
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+}
+
+/**
+ * idpf_intr_rel - Release interrupt capabilities and free memory
+ * @adapter: adapter to disable interrupts on
+ */
+void idpf_intr_rel(struct idpf_adapter *adapter)
+{
+ int err;
+
+ if (!adapter->msix_entries)
+ return;
+
+ idpf_mb_intr_rel_irq(adapter);
+ pci_free_irq_vectors(adapter->pdev);
+
+ err = idpf_send_dealloc_vectors_msg(adapter);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to deallocate vectors: %d\n", err);
+
+ idpf_deinit_vector_stack(adapter);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+}
+
+/**
+ * idpf_mb_intr_clean - Interrupt handler for the mailbox
+ * @irq: interrupt number
+ * @data: pointer to the adapter structure
+ */
+static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
+{
+ struct idpf_adapter *adapter = (struct idpf_adapter *)data;
+
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
+ * @adapter: adapter to get the hardware address for register write
+ */
+static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
+{
+ struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
+ u32 val;
+
+ val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
+ writel(val, intr->dyn_ctl);
+ writel(intr->icr_ena_ctlq_m, intr->icr_ena);
+}
+
+/**
+ * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
+ * @adapter: adapter structure to pass to the mailbox irq handler
+ */
+static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
+{
+ struct idpf_q_vector *mb_vector = &adapter->mb_vector;
+ int irq_num, mb_vidx = 0, err;
+
+ irq_num = adapter->msix_entries[mb_vidx].vector;
+ mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
+ dev_driver_string(&adapter->pdev->dev),
+ "Mailbox", mb_vidx);
+ err = request_irq(irq_num, adapter->irq_mb_handler, 0,
+ mb_vector->name, adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "IRQ request for mailbox failed, error: %d\n", err);
+
+ return err;
+ }
+
+ set_bit(IDPF_MB_INTR_MODE, adapter->flags);
+
+ return 0;
+}
+
+/**
+ * idpf_set_mb_vec_id - Set vector index for mailbox
+ * @adapter: adapter structure to access the vector chunks
+ *
+ * The first vector id in the requested vector chunks from the CP is for
+ * the mailbox
+ */
+static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
+{
+ if (adapter->req_vec_chunks)
+ adapter->mb_vector.v_idx =
+ le16_to_cpu(adapter->caps.mailbox_vector_id);
+ else
+ adapter->mb_vector.v_idx = 0;
+}
+
+/**
+ * idpf_mb_intr_init - Initialize the mailbox interrupt
+ * @adapter: adapter structure to store the mailbox vector
+ */
+static int idpf_mb_intr_init(struct idpf_adapter *adapter)
+{
+ adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
+ adapter->irq_mb_handler = idpf_mb_intr_clean;
+
+ return idpf_mb_intr_req_irq(adapter);
+}
+
+/**
+ * idpf_vector_lifo_push - push MSIX vector index onto stack
+ * @adapter: private data struct
+ * @vec_idx: vector index to store
+ */
+static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
+{
+ struct idpf_vector_lifo *stack = &adapter->vector_stack;
+
+ lockdep_assert_held(&adapter->vector_lock);
+
+ if (stack->top == stack->base) {
+ dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
+ stack->top);
+ return -EINVAL;
+ }
+
+ stack->vec_idx[--stack->top] = vec_idx;
+
+ return 0;
+}
+
+/**
+ * idpf_vector_lifo_pop - pop MSIX vector index from stack
+ * @adapter: private data struct
+ */
+static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
+{
+ struct idpf_vector_lifo *stack = &adapter->vector_stack;
+
+ lockdep_assert_held(&adapter->vector_lock);
+
+ if (stack->top == stack->size) {
+ dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
+
+ return -EINVAL;
+ }
+
+ return stack->vec_idx[stack->top++];
+}
+
+/**
+ * idpf_vector_stash - Store the vector indexes onto the stack
+ * @adapter: private data struct
+ * @q_vector_idxs: vector index array
+ * @vec_info: info related to the number of vectors
+ *
+ * This function is a no-op if there are no vectors indexes to be stashed
+ */
+static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
+ struct idpf_vector_info *vec_info)
+{
+ int i, base = 0;
+ u16 vec_idx;
+
+ lockdep_assert_held(&adapter->vector_lock);
+
+ if (!vec_info->num_curr_vecs)
+ return;
+
+ /* For default vports, no need to stash vector allocated from the
+ * default pool onto the stack
+ */
+ if (vec_info->default_vport)
+ base = IDPF_MIN_Q_VEC;
+
+ for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
+ vec_idx = q_vector_idxs[i];
+ idpf_vector_lifo_push(adapter, vec_idx);
+ adapter->num_avail_msix++;
+ }
+}
+
+/**
+ * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes
+ * @adapter: driver specific private structure
+ * @q_vector_idxs: vector index array
+ * @vec_info: info related to the number of vectors
+ *
+ * This is the core function to distribute the MSIX vectors acquired from the
+ * OS. It expects the caller to pass the number of vectors required and
+ * also previously allocated. First, it stashes previously allocated vector
+ * indexes on to the stack and then figures out if it can allocate requested
+ * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as
+ * requested vectors, then this function just stashes the already allocated
+ * vectors and returns 0.
+ *
+ * Returns actual number of vectors allocated on success, error value on failure
+ * If 0 is returned, implies the stack has no vectors to allocate which is also
+ * a failure case for the caller
+ */
+int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
+ u16 *q_vector_idxs,
+ struct idpf_vector_info *vec_info)
+{
+ u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
+ struct idpf_vector_lifo *stack;
+ int i, j, vecid;
+
+ mutex_lock(&adapter->vector_lock);
+ stack = &adapter->vector_stack;
+ num_req_vecs = vec_info->num_req_vecs;
+
+ /* Stash interrupt vector indexes onto the stack if required */
+ idpf_vector_stash(adapter, q_vector_idxs, vec_info);
+
+ if (!num_req_vecs)
+ goto rel_lock;
+
+ if (vec_info->default_vport) {
+ /* As IDPF_MIN_Q_VEC per default vport is put aside in the
+ * default pool of the stack, use them for default vports
+ */
+ j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
+ for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
+ q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
+ num_req_vecs--;
+ }
+ }
+
+ /* Find if stack has enough vector to allocate */
+ max_vecs = min(adapter->num_avail_msix, num_req_vecs);
+
+ for (j = 0; j < max_vecs; j++) {
+ vecid = idpf_vector_lifo_pop(adapter);
+ q_vector_idxs[num_alloc_vecs++] = vecid;
+ }
+ adapter->num_avail_msix -= max_vecs;
+
+rel_lock:
+ mutex_unlock(&adapter->vector_lock);
+
+ return num_alloc_vecs;
+}
+
+/**
+ * idpf_intr_req - Request interrupt capabilities
+ * @adapter: adapter to enable interrupts on
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_intr_req(struct idpf_adapter *adapter)
+{
+ u16 default_vports = idpf_get_default_vports(adapter);
+ int num_q_vecs, total_vecs, num_vec_ids;
+ int min_vectors, v_actual, err;
+ unsigned int vector;
+ u16 *vecids;
+
+ total_vecs = idpf_get_reserved_vecs(adapter);
+ num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
+
+ err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate %d vectors: %d\n", num_q_vecs, err);
+
+ return -EAGAIN;
+ }
+
+ min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
+ v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
+ total_vecs, PCI_IRQ_MSIX);
+ if (v_actual < min_vectors) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
+ v_actual);
+ err = -EAGAIN;
+ goto send_dealloc_vecs;
+ }
+
+ adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
+ GFP_KERNEL);
+
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+
+ idpf_set_mb_vec_id(adapter);
+
+ vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
+ if (!vecids) {
+ err = -ENOMEM;
+ goto free_msix;
+ }
+
+ if (adapter->req_vec_chunks) {
+ struct virtchnl2_vector_chunks *vchunks;
+ struct virtchnl2_alloc_vectors *ac;
+
+ ac = adapter->req_vec_chunks;
+ vchunks = &ac->vchunks;
+
+ num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
+ vchunks);
+ if (num_vec_ids < v_actual) {
+ err = -EINVAL;
+ goto free_vecids;
+ }
+ } else {
+ int i;
+
+ for (i = 0; i < v_actual; i++)
+ vecids[i] = i;
+ }
+
+ for (vector = 0; vector < v_actual; vector++) {
+ adapter->msix_entries[vector].entry = vecids[vector];
+ adapter->msix_entries[vector].vector =
+ pci_irq_vector(adapter->pdev, vector);
+ }
+
+ adapter->num_req_msix = total_vecs;
+ adapter->num_msix_entries = v_actual;
+ /* 'num_avail_msix' is used to distribute excess vectors to the vports
+ * after considering the minimum vectors required per each default
+ * vport
+ */
+ adapter->num_avail_msix = v_actual - min_vectors;
+
+ /* Fill MSIX vector lifo stack with vector indexes */
+ err = idpf_init_vector_stack(adapter);
+ if (err)
+ goto free_vecids;
+
+ err = idpf_mb_intr_init(adapter);
+ if (err)
+ goto deinit_vec_stack;
+ idpf_mb_irq_enable(adapter);
+ kfree(vecids);
+
+ return 0;
+
+deinit_vec_stack:
+ idpf_deinit_vector_stack(adapter);
+free_vecids:
+ kfree(vecids);
+free_msix:
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+free_irq:
+ pci_free_irq_vectors(adapter->pdev);
+send_dealloc_vecs:
+ idpf_send_dealloc_vectors_msg(adapter);
+
+ return err;
+}
+
+/**
+ * idpf_find_mac_filter - Search filter list for specific mac filter
+ * @vconfig: Vport config structure
+ * @macaddr: The MAC address
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_filter_list_lock.
+ **/
+static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
+ const u8 *macaddr)
+{
+ struct idpf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
+ if (ether_addr_equal(macaddr, f->macaddr))
+ return f;
+ }
+
+ return NULL;
+}
+
+/**
+ * __idpf_del_mac_filter - Delete a MAC filter from the filter list
+ * @vport_config: Vport config structure
+ * @macaddr: The MAC address
+ *
+ * Returns 0 on success, error value on failure
+ **/
+static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
+ const u8 *macaddr)
+{
+ struct idpf_mac_filter *f;
+
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ f = idpf_find_mac_filter(vport_config, macaddr);
+ if (f) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return 0;
+}
+
+/**
+ * idpf_del_mac_filter - Delete a MAC filter from the filter list
+ * @vport: Main vport structure
+ * @np: Netdev private structure
+ * @macaddr: The MAC address
+ * @async: Don't wait for return message
+ *
+ * Removes filter from list and if interface is up, tells hardware about the
+ * removed filter.
+ **/
+static int idpf_del_mac_filter(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ const u8 *macaddr, bool async)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f;
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ f = idpf_find_mac_filter(vport_config, macaddr);
+ if (f) {
+ f->remove = true;
+ } else {
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return -EINVAL;
+ }
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ if (np->state == __IDPF_VPORT_UP) {
+ int err;
+
+ err = idpf_add_del_mac_filters(vport, np, false, async);
+ if (err)
+ return err;
+ }
+
+ return __idpf_del_mac_filter(vport_config, macaddr);
+}
+
+/**
+ * __idpf_add_mac_filter - Add mac filter helper function
+ * @vport_config: Vport config structure
+ * @macaddr: Address to add
+ *
+ * Takes mac_filter_list_lock spinlock to add new filter to list.
+ */
+static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
+ const u8 *macaddr)
+{
+ struct idpf_mac_filter *f;
+
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ f = idpf_find_mac_filter(vport_config, macaddr);
+ if (f) {
+ f->remove = false;
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return 0;
+ }
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f) {
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(f->macaddr, macaddr);
+ list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
+ f->add = true;
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return 0;
+}
+
+/**
+ * idpf_add_mac_filter - Add a mac filter to the filter list
+ * @vport: Main vport structure
+ * @np: Netdev private structure
+ * @macaddr: The MAC address
+ * @async: Don't wait for return message
+ *
+ * Returns 0 on success or error on failure. If interface is up, we'll also
+ * send the virtchnl message to tell hardware about the filter.
+ **/
+static int idpf_add_mac_filter(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ const u8 *macaddr, bool async)
+{
+ struct idpf_vport_config *vport_config;
+ int err;
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+ err = __idpf_add_mac_filter(vport_config, macaddr);
+ if (err)
+ return err;
+
+ if (np->state == __IDPF_VPORT_UP)
+ err = idpf_add_del_mac_filters(vport, np, true, async);
+
+ return err;
+}
+
+/**
+ * idpf_del_all_mac_filters - Delete all MAC filters in list
+ * @vport: main vport struct
+ *
+ * Takes mac_filter_list_lock spinlock. Deletes all filters
+ */
+static void idpf_del_all_mac_filters(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f, *ftmp;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
+ list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+}
+
+/**
+ * idpf_restore_mac_filters - Re-add all MAC filters in list
+ * @vport: main vport struct
+ *
+ * Takes mac_filter_list_lock spinlock. Sets add field to true for filters to
+ * resync filters back to HW.
+ */
+static void idpf_restore_mac_filters(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
+ f->add = true;
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ true, false);
+}
+
+/**
+ * idpf_remove_mac_filters - Remove all MAC filters in list
+ * @vport: main vport struct
+ *
+ * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters
+ * to remove filters in HW.
+ */
+static void idpf_remove_mac_filters(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
+ f->remove = true;
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ false, false);
+}
+
+/**
+ * idpf_deinit_mac_addr - deinitialize mac address for vport
+ * @vport: main vport structure
+ */
+static void idpf_deinit_mac_addr(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
+ if (f) {
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+}
+
+/**
+ * idpf_init_mac_addr - initialize mac address for vport
+ * @vport: main vport structure
+ * @netdev: pointer to netdev struct associated with this vport
+ */
+static int idpf_init_mac_addr(struct idpf_vport *vport,
+ struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_adapter *adapter = vport->adapter;
+ int err;
+
+ if (is_valid_ether_addr(vport->default_mac_addr)) {
+ eth_hw_addr_set(netdev, vport->default_mac_addr);
+ ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
+
+ return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
+ false);
+ }
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_MACFILTER)) {
+ dev_err(&adapter->pdev->dev,
+ "MAC address is not provided and capability is not set\n");
+
+ return -EINVAL;
+ }
+
+ eth_hw_addr_random(netdev);
+ err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
+ if (err)
+ return err;
+
+ dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
+ vport->default_mac_addr, netdev->dev_addr);
+ ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
+
+ return 0;
+}
+
+/**
+ * idpf_cfg_netdev - Allocate, configure and register a netdev
+ * @vport: main vport structure
+ *
+ * Returns 0 on success, negative value on failure.
+ */
+static int idpf_cfg_netdev(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ netdev_features_t dflt_features;
+ netdev_features_t offloads = 0;
+ struct idpf_netdev_priv *np;
+ struct net_device *netdev;
+ u16 idx = vport->idx;
+ int err;
+
+ vport_config = adapter->vport_config[idx];
+
+ /* It's possible we already have a netdev allocated and registered for
+ * this vport
+ */
+ if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
+ netdev = adapter->netdevs[idx];
+ np = netdev_priv(netdev);
+ np->vport = vport;
+ np->vport_idx = vport->idx;
+ np->vport_id = vport->vport_id;
+ vport->netdev = netdev;
+
+ return idpf_init_mac_addr(vport, netdev);
+ }
+
+ netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
+ vport_config->max_q.max_txq,
+ vport_config->max_q.max_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ vport->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vport = vport;
+ np->adapter = adapter;
+ np->vport_idx = vport->idx;
+ np->vport_id = vport->vport_id;
+
+ spin_lock_init(&np->stats_lock);
+
+ err = idpf_init_mac_addr(vport, netdev);
+ if (err) {
+ free_netdev(vport->netdev);
+ vport->netdev = NULL;
+
+ return err;
+ }
+
+ /* assign netdev_ops */
+ if (idpf_is_queue_model_split(vport->txq_model))
+ netdev->netdev_ops = &idpf_netdev_ops_splitq;
+ else
+ netdev->netdev_ops = &idpf_netdev_ops_singleq;
+
+ /* setup watchdog timeout value to be 5 second */
+ netdev->watchdog_timeo = 5 * HZ;
+
+ /* configure default MTU size */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = vport->max_mtu;
+
+ dflt_features = NETIF_F_SG |
+ NETIF_F_HIGHDMA;
+
+ if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
+ dflt_features |= NETIF_F_RXHASH;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
+ dflt_features |= NETIF_F_IP_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
+ dflt_features |= NETIF_F_IPV6_CSUM;
+ if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
+ dflt_features |= NETIF_F_RXCSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
+ dflt_features |= NETIF_F_SCTP_CRC;
+
+ if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
+ dflt_features |= NETIF_F_TSO;
+ if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
+ dflt_features |= NETIF_F_TSO6;
+ if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
+ VIRTCHNL2_CAP_SEG_IPV4_UDP |
+ VIRTCHNL2_CAP_SEG_IPV6_UDP))
+ dflt_features |= NETIF_F_GSO_UDP_L4;
+ if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
+ offloads |= NETIF_F_GRO_HW;
+ /* advertise to stack only if offloads for encapsulated packets is
+ * supported
+ */
+ if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
+ VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
+ offloads |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ 0;
+
+ if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
+ IDPF_CAP_TUNNEL_TX_CSUM))
+ netdev->gso_partial_features |=
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ offloads |= NETIF_F_TSO_MANGLEID;
+ }
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
+ offloads |= NETIF_F_LOOPBACK;
+
+ netdev->features |= dflt_features;
+ netdev->hw_features |= dflt_features | offloads;
+ netdev->hw_enc_features |= dflt_features | offloads;
+ idpf_set_ethtool_ops(netdev);
+ SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
+
+ /* carrier off on init to avoid Tx hangs */
+ netif_carrier_off(netdev);
+
+ /* make sure transmit queues start off as stopped */
+ netif_tx_stop_all_queues(netdev);
+
+ /* The vport can be arbitrarily released so we need to also track
+ * netdevs in the adapter struct
+ */
+ adapter->netdevs[idx] = netdev;
+
+ return 0;
+}
+
+/**
+ * idpf_get_free_slot - get the next non-NULL location index in array
+ * @adapter: adapter in which to look for a free vport slot
+ */
+static int idpf_get_free_slot(struct idpf_adapter *adapter)
+{
+ unsigned int i;
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ if (!adapter->vports[i])
+ return i;
+ }
+
+ return IDPF_NO_FREE_SLOT;
+}
+
+/**
+ * idpf_remove_features - Turn off feature configs
+ * @vport: virtual port structure
+ */
+static void idpf_remove_features(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
+ idpf_remove_mac_filters(vport);
+}
+
+/**
+ * idpf_vport_stop - Disable a vport
+ * @vport: vport to disable
+ */
+static void idpf_vport_stop(struct idpf_vport *vport)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+
+ if (np->state <= __IDPF_VPORT_DOWN)
+ return;
+
+ netif_carrier_off(vport->netdev);
+ netif_tx_disable(vport->netdev);
+
+ idpf_send_disable_vport_msg(vport);
+ idpf_send_disable_queues_msg(vport);
+ idpf_send_map_unmap_queue_vector_msg(vport, false);
+ /* Normally we ask for queues in create_vport, but if the number of
+ * initially requested queues have changed, for example via ethtool
+ * set channels, we do delete queues and then add the queues back
+ * instead of deleting and reallocating the vport.
+ */
+ if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
+ idpf_send_delete_queues_msg(vport);
+
+ idpf_remove_features(vport);
+
+ vport->link_up = false;
+ idpf_vport_intr_deinit(vport);
+ idpf_vport_intr_rel(vport);
+ idpf_vport_queues_rel(vport);
+ np->state = __IDPF_VPORT_DOWN;
+}
+
+/**
+ * idpf_stop - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The stop entry point is called when an interface is de-activated by the OS,
+ * and the netdevice enters the DOWN state. The hardware is still under the
+ * driver's control, but the netdev interface is disabled.
+ *
+ * Returns success only - not allowed to fail
+ */
+static int idpf_stop(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+
+ if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
+ return 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ idpf_vport_stop(vport);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+}
+
+/**
+ * idpf_decfg_netdev - Unregister the netdev
+ * @vport: vport for which netdev to be unregistered
+ */
+static void idpf_decfg_netdev(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+
+ unregister_netdev(vport->netdev);
+ free_netdev(vport->netdev);
+ vport->netdev = NULL;
+
+ adapter->netdevs[vport->idx] = NULL;
+}
+
+/**
+ * idpf_vport_rel - Delete a vport and free its resources
+ * @vport: the vport being removed
+ */
+static void idpf_vport_rel(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct idpf_vector_info vec_info;
+ struct idpf_rss_data *rss_data;
+ struct idpf_vport_max_q max_q;
+ u16 idx = vport->idx;
+ int i;
+
+ vport_config = adapter->vport_config[vport->idx];
+ idpf_deinit_rss(vport);
+ rss_data = &vport_config->user_config.rss_data;
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = NULL;
+
+ idpf_send_destroy_vport_msg(vport);
+
+ /* Set all bits as we dont know on which vc_state the vport vhnl_wq
+ * is waiting on and wakeup the virtchnl workqueue even if it is
+ * waiting for the response as we are going down
+ */
+ for (i = 0; i < IDPF_VC_NBITS; i++)
+ set_bit(i, vport->vc_state);
+ wake_up(&vport->vchnl_wq);
+
+ mutex_destroy(&vport->vc_buf_lock);
+
+ /* Clear all the bits */
+ for (i = 0; i < IDPF_VC_NBITS; i++)
+ clear_bit(i, vport->vc_state);
+
+ /* Release all max queues allocated to the adapter's pool */
+ max_q.max_rxq = vport_config->max_q.max_rxq;
+ max_q.max_txq = vport_config->max_q.max_txq;
+ max_q.max_bufq = vport_config->max_q.max_bufq;
+ max_q.max_complq = vport_config->max_q.max_complq;
+ idpf_vport_dealloc_max_qs(adapter, &max_q);
+
+ /* Release all the allocated vectors on the stack */
+ vec_info.num_req_vecs = 0;
+ vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.default_vport = vport->default_vport;
+
+ idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
+
+ kfree(vport->q_vector_idxs);
+ vport->q_vector_idxs = NULL;
+
+ kfree(adapter->vport_params_recvd[idx]);
+ adapter->vport_params_recvd[idx] = NULL;
+ kfree(adapter->vport_params_reqd[idx]);
+ adapter->vport_params_reqd[idx] = NULL;
+ if (adapter->vport_config[idx]) {
+ kfree(adapter->vport_config[idx]->req_qs_chunks);
+ adapter->vport_config[idx]->req_qs_chunks = NULL;
+ }
+ kfree(vport);
+ adapter->num_alloc_vports--;
+}
+
+/**
+ * idpf_vport_dealloc - cleanup and release a given vport
+ * @vport: pointer to idpf vport structure
+ *
+ * returns nothing
+ */
+static void idpf_vport_dealloc(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ unsigned int i = vport->idx;
+
+ idpf_deinit_mac_addr(vport);
+ idpf_vport_stop(vport);
+
+ if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
+ idpf_decfg_netdev(vport);
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ idpf_del_all_mac_filters(vport);
+
+ if (adapter->netdevs[i]) {
+ struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
+
+ np->vport = NULL;
+ }
+
+ idpf_vport_rel(vport);
+
+ adapter->vports[i] = NULL;
+ adapter->next_vport = idpf_get_free_slot(adapter);
+}
+
+/**
+ * idpf_vport_alloc - Allocates the next available struct vport in the adapter
+ * @adapter: board private structure
+ * @max_q: vport max queue info
+ *
+ * returns a pointer to a vport on success, NULL on failure.
+ */
+static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q)
+{
+ struct idpf_rss_data *rss_data;
+ u16 idx = adapter->next_vport;
+ struct idpf_vport *vport;
+ u16 num_max_q;
+
+ if (idx == IDPF_NO_FREE_SLOT)
+ return NULL;
+
+ vport = kzalloc(sizeof(*vport), GFP_KERNEL);
+ if (!vport)
+ return vport;
+
+ if (!adapter->vport_config[idx]) {
+ struct idpf_vport_config *vport_config;
+
+ vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
+ if (!vport_config) {
+ kfree(vport);
+
+ return NULL;
+ }
+
+ adapter->vport_config[idx] = vport_config;
+ }
+
+ vport->idx = idx;
+ vport->adapter = adapter;
+ vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET;
+ vport->default_vport = adapter->num_alloc_vports <
+ idpf_get_default_vports(adapter);
+
+ num_max_q = max(max_q->max_txq, max_q->max_rxq);
+ vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
+ if (!vport->q_vector_idxs) {
+ kfree(vport);
+
+ return NULL;
+ }
+ idpf_vport_init(vport, max_q);
+
+ /* This alloc is done separate from the LUT because it's not strictly
+ * dependent on how many queues we have. If we change number of queues
+ * and soft reset we'll need a new LUT but the key can remain the same
+ * for as long as the vport exists.
+ */
+ rss_data = &adapter->vport_config[idx]->user_config.rss_data;
+ rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ kfree(vport);
+
+ return NULL;
+ }
+ /* Initialize default rss key */
+ netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
+
+ /* fill vport slot in the adapter struct */
+ adapter->vports[idx] = vport;
+ adapter->vport_ids[idx] = idpf_get_vport_id(vport);
+
+ adapter->num_alloc_vports++;
+ /* prepare adapter->next_vport for next use */
+ adapter->next_vport = idpf_get_free_slot(adapter);
+
+ return vport;
+}
+
+/**
+ * idpf_get_stats64 - get statistics for network device structure
+ * @netdev: network interface device structure
+ * @stats: main device statistics structure
+ */
+static void idpf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ spin_lock_bh(&np->stats_lock);
+ *stats = np->netstats;
+ spin_unlock_bh(&np->stats_lock);
+}
+
+/**
+ * idpf_statistics_task - Delayed task to get statistics over mailbox
+ * @work: work_struct handle to our data
+ */
+void idpf_statistics_task(struct work_struct *work)
+{
+ struct idpf_adapter *adapter;
+ int i;
+
+ adapter = container_of(work, struct idpf_adapter, stats_task.work);
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+
+ if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
+ idpf_send_get_stats_msg(vport);
+ }
+
+ queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
+ msecs_to_jiffies(10000));
+}
+
+/**
+ * idpf_mbx_task - Delayed task to handle mailbox responses
+ * @work: work_struct handle
+ */
+void idpf_mbx_task(struct work_struct *work)
+{
+ struct idpf_adapter *adapter;
+
+ adapter = container_of(work, struct idpf_adapter, mbx_task.work);
+
+ if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
+ idpf_mb_irq_enable(adapter);
+ else
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
+ msecs_to_jiffies(300));
+
+ idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
+}
+
+/**
+ * idpf_service_task - Delayed task for handling mailbox responses
+ * @work: work_struct handle to our data
+ *
+ */
+void idpf_service_task(struct work_struct *work)
+{
+ struct idpf_adapter *adapter;
+
+ adapter = container_of(work, struct idpf_adapter, serv_task.work);
+
+ if (idpf_is_reset_detected(adapter) &&
+ !idpf_is_reset_in_prog(adapter) &&
+ !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
+ dev_info(&adapter->pdev->dev, "HW reset detected\n");
+ set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq,
+ &adapter->vc_event_task,
+ msecs_to_jiffies(10));
+ }
+
+ queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
+ msecs_to_jiffies(300));
+}
+
+/**
+ * idpf_restore_features - Restore feature configs
+ * @vport: virtual port structure
+ */
+static void idpf_restore_features(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
+ idpf_restore_mac_filters(vport);
+}
+
+/**
+ * idpf_set_real_num_queues - set number of queues for netdev
+ * @vport: virtual port structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_set_real_num_queues(struct idpf_vport *vport)
+{
+ int err;
+
+ err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
+ if (err)
+ return err;
+
+ return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
+}
+
+/**
+ * idpf_up_complete - Complete interface up sequence
+ * @vport: virtual port structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_up_complete(struct idpf_vport *vport)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+
+ if (vport->link_up && !netif_carrier_ok(vport->netdev)) {
+ netif_carrier_on(vport->netdev);
+ netif_tx_start_all_queues(vport->netdev);
+ }
+
+ np->state = __IDPF_VPORT_UP;
+
+ return 0;
+}
+
+/**
+ * idpf_rx_init_buf_tail - Write initial buffer ring tail value
+ * @vport: virtual port struct
+ */
+static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
+{
+ int i, j;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_queue *q =
+ &grp->splitq.bufq_sets[j].bufq;
+
+ writel(q->next_to_alloc, q->tail);
+ }
+ } else {
+ for (j = 0; j < grp->singleq.num_rxq; j++) {
+ struct idpf_queue *q =
+ grp->singleq.rxqs[j];
+
+ writel(q->next_to_alloc, q->tail);
+ }
+ }
+ }
+}
+
+/**
+ * idpf_vport_open - Bring up a vport
+ * @vport: vport to bring up
+ * @alloc_res: allocate queue resources
+ */
+static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ int err;
+
+ if (np->state != __IDPF_VPORT_DOWN)
+ return -EBUSY;
+
+ /* we do not allow interface up just yet */
+ netif_carrier_off(vport->netdev);
+
+ if (alloc_res) {
+ err = idpf_vport_queues_alloc(vport);
+ if (err)
+ return err;
+ }
+
+ err = idpf_vport_intr_alloc(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
+ vport->vport_id, err);
+ goto queues_rel;
+ }
+
+ err = idpf_vport_queue_ids_init(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_rel;
+ }
+
+ err = idpf_vport_intr_init(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_rel;
+ }
+
+ err = idpf_rx_bufs_init_all(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_rel;
+ }
+
+ err = idpf_queue_reg_init(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_rel;
+ }
+
+ idpf_rx_init_buf_tail(vport);
+
+ err = idpf_send_config_queues_msg(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
+ vport->vport_id, err);
+ goto intr_deinit;
+ }
+
+ err = idpf_send_map_unmap_queue_vector_msg(vport, true);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_deinit;
+ }
+
+ err = idpf_send_enable_queues_msg(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n",
+ vport->vport_id, err);
+ goto unmap_queue_vectors;
+ }
+
+ err = idpf_send_enable_vport_msg(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
+ vport->vport_id, err);
+ err = -EAGAIN;
+ goto disable_queues;
+ }
+
+ idpf_restore_features(vport);
+
+ vport_config = adapter->vport_config[vport->idx];
+ if (vport_config->user_config.rss_data.rss_lut)
+ err = idpf_config_rss(vport);
+ else
+ err = idpf_init_rss(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
+ vport->vport_id, err);
+ goto disable_vport;
+ }
+
+ err = idpf_up_complete(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
+ vport->vport_id, err);
+ goto deinit_rss;
+ }
+
+ return 0;
+
+deinit_rss:
+ idpf_deinit_rss(vport);
+disable_vport:
+ idpf_send_disable_vport_msg(vport);
+disable_queues:
+ idpf_send_disable_queues_msg(vport);
+unmap_queue_vectors:
+ idpf_send_map_unmap_queue_vector_msg(vport, false);
+intr_deinit:
+ idpf_vport_intr_deinit(vport);
+intr_rel:
+ idpf_vport_intr_rel(vport);
+queues_rel:
+ idpf_vport_queues_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_init_task - Delayed initialization task
+ * @work: work_struct handle to our data
+ *
+ * Init task finishes up pending work started in probe. Due to the asynchronous
+ * nature in which the device communicates with hardware, we may have to wait
+ * several milliseconds to get a response. Instead of busy polling in probe,
+ * pulling it out into a delayed work task prevents us from bogging down the
+ * whole system waiting for a response from hardware.
+ */
+void idpf_init_task(struct work_struct *work)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_vport_max_q max_q;
+ struct idpf_adapter *adapter;
+ struct idpf_netdev_priv *np;
+ struct idpf_vport *vport;
+ u16 num_default_vports;
+ struct pci_dev *pdev;
+ bool default_vport;
+ int index, err;
+
+ adapter = container_of(work, struct idpf_adapter, init_task.work);
+
+ num_default_vports = idpf_get_default_vports(adapter);
+ if (adapter->num_alloc_vports < num_default_vports)
+ default_vport = true;
+ else
+ default_vport = false;
+
+ err = idpf_vport_alloc_max_qs(adapter, &max_q);
+ if (err)
+ goto unwind_vports;
+
+ err = idpf_send_create_vport_msg(adapter, &max_q);
+ if (err) {
+ idpf_vport_dealloc_max_qs(adapter, &max_q);
+ goto unwind_vports;
+ }
+
+ pdev = adapter->pdev;
+ vport = idpf_vport_alloc(adapter, &max_q);
+ if (!vport) {
+ err = -EFAULT;
+ dev_err(&pdev->dev, "failed to allocate vport: %d\n",
+ err);
+ idpf_vport_dealloc_max_qs(adapter, &max_q);
+ goto unwind_vports;
+ }
+
+ index = vport->idx;
+ vport_config = adapter->vport_config[index];
+
+ init_waitqueue_head(&vport->sw_marker_wq);
+ init_waitqueue_head(&vport->vchnl_wq);
+
+ mutex_init(&vport->vc_buf_lock);
+ spin_lock_init(&vport_config->mac_filter_list_lock);
+
+ INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
+
+ err = idpf_check_supported_desc_ids(vport);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get required descriptor ids\n");
+ goto cfg_netdev_err;
+ }
+
+ if (idpf_cfg_netdev(vport))
+ goto cfg_netdev_err;
+
+ err = idpf_send_get_rx_ptype_msg(vport);
+ if (err)
+ goto handle_err;
+
+ /* Once state is put into DOWN, driver is ready for dev_open */
+ np = netdev_priv(vport->netdev);
+ np->state = __IDPF_VPORT_DOWN;
+ if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
+ idpf_vport_open(vport, true);
+
+ /* Spawn and return 'idpf_init_task' work queue until all the
+ * default vports are created
+ */
+ if (adapter->num_alloc_vports < num_default_vports) {
+ queue_delayed_work(adapter->init_wq, &adapter->init_task,
+ msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
+
+ return;
+ }
+
+ for (index = 0; index < adapter->max_vports; index++) {
+ if (adapter->netdevs[index] &&
+ !test_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[index]->flags)) {
+ register_netdev(adapter->netdevs[index]);
+ set_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[index]->flags);
+ }
+ }
+
+ /* As all the required vports are created, clear the reset flag
+ * unconditionally here in case we were in reset and the link was down.
+ */
+ clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ /* Start the statistics task now */
+ queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
+ msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
+
+ return;
+
+handle_err:
+ idpf_decfg_netdev(vport);
+cfg_netdev_err:
+ idpf_vport_rel(vport);
+ adapter->vports[index] = NULL;
+unwind_vports:
+ if (default_vport) {
+ for (index = 0; index < adapter->max_vports; index++) {
+ if (adapter->vports[index])
+ idpf_vport_dealloc(adapter->vports[index]);
+ }
+ }
+ clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+}
+
+/**
+ * idpf_sriov_ena - Enable or change number of VFs
+ * @adapter: private data struct
+ * @num_vfs: number of VFs to allocate
+ */
+static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int err;
+
+ err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to allocate VFs: %d\n", err);
+
+ return err;
+ }
+
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (err) {
+ idpf_send_set_sriov_vfs_msg(adapter, 0);
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+
+ return err;
+ }
+
+ adapter->num_vfs = num_vfs;
+
+ return num_vfs;
+}
+
+/**
+ * idpf_sriov_configure - Configure the requested VFs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) {
+ dev_info(&pdev->dev, "SR-IOV is not supported on this device\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (num_vfs)
+ return idpf_sriov_ena(adapter, num_vfs);
+
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n");
+
+ return -EBUSY;
+ }
+
+ pci_disable_sriov(adapter->pdev);
+ idpf_send_set_sriov_vfs_msg(adapter, 0);
+ adapter->num_vfs = 0;
+
+ return 0;
+}
+
+/**
+ * idpf_deinit_task - Device deinit routine
+ * @adapter: Driver specific private structure
+ *
+ * Extended remove logic which will be used for
+ * hard reset as well
+ */
+void idpf_deinit_task(struct idpf_adapter *adapter)
+{
+ unsigned int i;
+
+ /* Wait until the init_task is done else this thread might release
+ * the resources first and the other thread might end up in a bad state
+ */
+ cancel_delayed_work_sync(&adapter->init_task);
+
+ if (!adapter->vports)
+ return;
+
+ cancel_delayed_work_sync(&adapter->stats_task);
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ if (adapter->vports[i])
+ idpf_vport_dealloc(adapter->vports[i]);
+ }
+}
+
+/**
+ * idpf_check_reset_complete - check that reset is complete
+ * @hw: pointer to hw struct
+ * @reset_reg: struct with reset registers
+ *
+ * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
+ **/
+static int idpf_check_reset_complete(struct idpf_hw *hw,
+ struct idpf_reset_reg *reset_reg)
+{
+ struct idpf_adapter *adapter = hw->back;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ u32 reg_val = readl(reset_reg->rstat);
+
+ /* 0xFFFFFFFF might be read if other side hasn't cleared the
+ * register for us yet and 0xFFFFFFFF is not a valid value for
+ * the register, so treat that as invalid.
+ */
+ if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m))
+ return 0;
+
+ usleep_range(5000, 10000);
+ }
+
+ dev_warn(&adapter->pdev->dev, "Device reset timeout!\n");
+ /* Clear the reset flag unconditionally here since the reset
+ * technically isn't in progress anymore from the driver's perspective
+ */
+ clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+
+ return -EBUSY;
+}
+
+/**
+ * idpf_set_vport_state - Set the vport state to be after the reset
+ * @adapter: Driver specific private structure
+ */
+static void idpf_set_vport_state(struct idpf_adapter *adapter)
+{
+ u16 i;
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ struct idpf_netdev_priv *np;
+
+ if (!adapter->netdevs[i])
+ continue;
+
+ np = netdev_priv(adapter->netdevs[i]);
+ if (np->state == __IDPF_VPORT_UP)
+ set_bit(IDPF_VPORT_UP_REQUESTED,
+ adapter->vport_config[i]->flags);
+ }
+}
+
+/**
+ * idpf_init_hard_reset - Initiate a hardware reset
+ * @adapter: Driver specific private structure
+ *
+ * Deallocate the vports and all the resources associated with them and
+ * reallocate. Also reinitialize the mailbox. Return 0 on success,
+ * negative on failure.
+ */
+static int idpf_init_hard_reset(struct idpf_adapter *adapter)
+{
+ struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
+ struct device *dev = &adapter->pdev->dev;
+ struct net_device *netdev;
+ int err;
+ u16 i;
+
+ mutex_lock(&adapter->vport_ctrl_lock);
+
+ dev_info(dev, "Device HW Reset initiated\n");
+
+ /* Avoid TX hangs on reset */
+ for (i = 0; i < adapter->max_vports; i++) {
+ netdev = adapter->netdevs[i];
+ if (!netdev)
+ continue;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+ }
+
+ /* Prepare for reset */
+ if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
+ } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
+ bool is_reset = idpf_is_reset_detected(adapter);
+
+ idpf_set_vport_state(adapter);
+ idpf_vc_core_deinit(adapter);
+ if (!is_reset)
+ reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
+ idpf_deinit_dflt_mbx(adapter);
+ } else {
+ dev_err(dev, "Unhandled hard reset cause\n");
+ err = -EBADRQC;
+ goto unlock_mutex;
+ }
+
+ /* Wait for reset to complete */
+ err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg);
+ if (err) {
+ dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n",
+ adapter->state);
+ goto unlock_mutex;
+ }
+
+ /* Reset is complete and so start building the driver resources again */
+ err = idpf_init_dflt_mbx(adapter);
+ if (err) {
+ dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
+ goto unlock_mutex;
+ }
+
+ /* Initialize the state machine, also allocate memory and request
+ * resources
+ */
+ err = idpf_vc_core_init(adapter);
+ if (err) {
+ idpf_deinit_dflt_mbx(adapter);
+ goto unlock_mutex;
+ }
+
+ /* Wait till all the vports are initialized to release the reset lock,
+ * else user space callbacks may access uninitialized vports
+ */
+ while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
+ msleep(100);
+
+unlock_mutex:
+ mutex_unlock(&adapter->vport_ctrl_lock);
+
+ return err;
+}
+
+/**
+ * idpf_vc_event_task - Handle virtchannel event logic
+ * @work: work queue struct
+ */
+void idpf_vc_event_task(struct work_struct *work)
+{
+ struct idpf_adapter *adapter;
+
+ adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
+
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ return;
+
+ if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
+ test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ idpf_init_hard_reset(adapter);
+ }
+}
+
+/**
+ * idpf_initiate_soft_reset - Initiate a software reset
+ * @vport: virtual port data struct
+ * @reset_cause: reason for the soft reset
+ *
+ * Soft reset only reallocs vport queue resources. Returns 0 on success,
+ * negative on failure.
+ */
+int idpf_initiate_soft_reset(struct idpf_vport *vport,
+ enum idpf_vport_reset_cause reset_cause)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ enum idpf_vport_state current_state = np->state;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport *new_vport;
+ int err, i;
+
+ /* If the system is low on memory, we can end up in bad state if we
+ * free all the memory for queue resources and try to allocate them
+ * again. Instead, we can pre-allocate the new resources before doing
+ * anything and bailing if the alloc fails.
+ *
+ * Make a clone of the existing vport to mimic its current
+ * configuration, then modify the new structure with any requested
+ * changes. Once the allocation of the new resources is done, stop the
+ * existing vport and copy the configuration to the main vport. If an
+ * error occurred, the existing vport will be untouched.
+ *
+ */
+ new_vport = kzalloc(sizeof(*vport), GFP_KERNEL);
+ if (!new_vport)
+ return -ENOMEM;
+
+ /* This purposely avoids copying the end of the struct because it
+ * contains wait_queues and mutexes and other stuff we don't want to
+ * mess with. Nothing below should use those variables from new_vport
+ * and should instead always refer to them in vport if they need to.
+ */
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
+
+ /* Adjust resource parameters prior to reallocating resources */
+ switch (reset_cause) {
+ case IDPF_SR_Q_CHANGE:
+ err = idpf_vport_adjust_qs(new_vport);
+ if (err)
+ goto free_vport;
+ break;
+ case IDPF_SR_Q_DESC_CHANGE:
+ /* Update queue parameters before allocating resources */
+ idpf_vport_calc_num_q_desc(new_vport);
+ break;
+ case IDPF_SR_MTU_CHANGE:
+ case IDPF_SR_RSC_CHANGE:
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n");
+ err = -EINVAL;
+ goto free_vport;
+ }
+
+ err = idpf_vport_queues_alloc(new_vport);
+ if (err)
+ goto free_vport;
+ if (current_state <= __IDPF_VPORT_DOWN) {
+ idpf_send_delete_queues_msg(vport);
+ } else {
+ set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
+ idpf_vport_stop(vport);
+ }
+
+ idpf_deinit_rss(vport);
+ /* We're passing in vport here because we need its wait_queue
+ * to send a message and it should be getting all the vport
+ * config data out of the adapter but we need to be careful not
+ * to add code to add_queues to change the vport config within
+ * vport itself as it will be wiped with a memcpy later.
+ */
+ err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
+ new_vport->num_complq,
+ new_vport->num_rxq,
+ new_vport->num_bufq);
+ if (err)
+ goto err_reset;
+
+ /* Same comment as above regarding avoiding copying the wait_queues and
+ * mutexes applies here. We do not want to mess with those if possible.
+ */
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
+
+ /* Since idpf_vport_queues_alloc was called with new_port, the queue
+ * back pointers are currently pointing to the local new_vport. Reset
+ * the backpointers to the original vport here
+ */
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ int j;
+
+ tx_qgrp->vport = vport;
+ for (j = 0; j < tx_qgrp->num_txq; j++)
+ tx_qgrp->txqs[j]->vport = vport;
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ tx_qgrp->complq->vport = vport;
+ }
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ struct idpf_queue *q;
+ u16 num_rxq;
+ int j;
+
+ rx_qgrp->vport = vport;
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
+ rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+ q->vport = vport;
+ }
+ }
+
+ if (reset_cause == IDPF_SR_Q_CHANGE)
+ idpf_vport_alloc_vec_indexes(vport);
+
+ err = idpf_set_real_num_queues(vport);
+ if (err)
+ goto err_reset;
+
+ if (current_state == __IDPF_VPORT_UP)
+ err = idpf_vport_open(vport, false);
+
+ kfree(new_vport);
+
+ return err;
+
+err_reset:
+ idpf_vport_queues_rel(new_vport);
+free_vport:
+ kfree(new_vport);
+
+ return err;
+}
+
+/**
+ * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
+ * meaning we cannot sleep in this context. Due to this, we have to add the
+ * filter and send the virtchnl message asynchronously without waiting for the
+ * response from the other side. We won't know whether or not the operation
+ * actually succeeded until we get the message back. Returns 0 on success,
+ * negative on failure.
+ */
+static int idpf_addr_sync(struct net_device *netdev, const u8 *addr)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return idpf_add_mac_filter(np->vport, np, addr, true);
+}
+
+/**
+ * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
+ * meaning we cannot sleep in this context. Due to this we have to delete the
+ * filter and send the virtchnl message asynchronously without waiting for the
+ * return from the other side. We won't know whether or not the operation
+ * actually succeeded until we get the message back. Returns 0 on success,
+ * negative on failure.
+ */
+static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ /* Under some circumstances, we might receive a request to delete
+ * our own device address from our uc list. Because we store the
+ * device address in the VSI's MAC filter list, we need to ignore
+ * such requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
+ idpf_del_mac_filter(np->vport, np, addr, true);
+
+ return 0;
+}
+
+/**
+ * idpf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ *
+ * Stack takes addr_list_lock spinlock before calling our .set_rx_mode. We
+ * cannot sleep in this context.
+ */
+static void idpf_set_rx_mode(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *config_data;
+ struct idpf_adapter *adapter;
+ bool changed = false;
+ struct device *dev;
+ int err;
+
+ adapter = np->adapter;
+ dev = &adapter->pdev->dev;
+
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) {
+ __dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
+ __dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
+ }
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC))
+ return;
+
+ config_data = &adapter->vport_config[np->vport_idx]->user_config;
+ /* IFF_PROMISC enables both unicast and multicast promiscuous,
+ * while IFF_ALLMULTI only enables multicast such that:
+ *
+ * promisc + allmulti = unicast | multicast
+ * promisc + !allmulti = unicast | multicast
+ * !promisc + allmulti = multicast
+ */
+ if ((netdev->flags & IFF_PROMISC) &&
+ !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
+ changed = true;
+ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags))
+ dev_info(dev, "Entering multicast promiscuous mode\n");
+ }
+
+ if (!(netdev->flags & IFF_PROMISC) &&
+ test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
+ changed = true;
+ dev_info(dev, "Leaving promiscuous mode\n");
+ }
+
+ if (netdev->flags & IFF_ALLMULTI &&
+ !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
+ changed = true;
+ dev_info(dev, "Entering multicast promiscuous mode\n");
+ }
+
+ if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) &&
+ test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
+ changed = true;
+ dev_info(dev, "Leaving multicast promiscuous mode\n");
+ }
+
+ if (!changed)
+ return;
+
+ err = idpf_set_promiscuous(adapter, config_data, np->vport_id);
+ if (err)
+ dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
+}
+
+/**
+ * idpf_vport_manage_rss_lut - disable/enable RSS
+ * @vport: the vport being changed
+ *
+ * In the event of disable request for RSS, this function will zero out RSS
+ * LUT, while in the event of enable request for RSS, it will reconfigure RSS
+ * LUT with the default LUT configuration.
+ */
+static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
+{
+ bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
+ struct idpf_rss_data *rss_data;
+ u16 idx = vport->idx;
+ int lut_size;
+
+ rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
+ lut_size = rss_data->rss_lut_size * sizeof(u32);
+
+ if (ena) {
+ /* This will contain the default or user configured LUT */
+ memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
+ } else {
+ /* Save a copy of the current LUT to be restored later if
+ * requested.
+ */
+ memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
+
+ /* Zero out the current LUT to disable */
+ memset(rss_data->rss_lut, 0, lut_size);
+ }
+
+ return idpf_config_rss(vport);
+}
+
+/**
+ * idpf_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ */
+static int idpf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
+ int err = 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ adapter = vport->adapter;
+
+ if (idpf_is_reset_in_prog(adapter)) {
+ dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
+ err = -EBUSY;
+ goto unlock_mutex;
+ }
+
+ if (changed & NETIF_F_RXHASH) {
+ netdev->features ^= NETIF_F_RXHASH;
+ err = idpf_vport_manage_rss_lut(vport);
+ if (err)
+ goto unlock_mutex;
+ }
+
+ if (changed & NETIF_F_GRO_HW) {
+ netdev->features ^= NETIF_F_GRO_HW;
+ err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE);
+ if (err)
+ goto unlock_mutex;
+ }
+
+ if (changed & NETIF_F_LOOPBACK) {
+ netdev->features ^= NETIF_F_LOOPBACK;
+ err = idpf_send_ena_dis_loopback_msg(vport);
+ }
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_open - Called when a network interface becomes active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog is enabled,
+ * and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int idpf_open(struct net_device *netdev)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ err = idpf_vport_open(vport, true);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_change_mtu - NDO callback to change the MTU
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ netdev->mtu = new_mtu;
+
+ err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_features_check - Validate packet conforms to limits
+ * @skb: skb buffer
+ * @netdev: This port's netdev
+ * @features: Offload features that the stack believes apply
+ */
+static netdev_features_t idpf_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ struct idpf_adapter *adapter = vport->adapter;
+ size_t len;
+
+ /* No point in doing any of this if neither checksum nor GSO are
+ * being requested for this frame. We can rule out both by just
+ * checking for CHECKSUM_PARTIAL
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ /* We cannot support GSO if the MSS is going to be less than
+ * 88 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
+ features &= ~NETIF_F_GSO_MASK;
+
+ /* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
+ len = skb_network_offset(skb);
+ if (unlikely(len & ~(126)))
+ goto unsupported;
+
+ len = skb_network_header_len(skb);
+ if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ goto unsupported;
+
+ if (!skb->encapsulation)
+ return features;
+
+ /* L4TUNLEN can support 127 words */
+ len = skb_inner_network_header(skb) - skb_transport_header(skb);
+ if (unlikely(len & ~(127 * 2)))
+ goto unsupported;
+
+ /* IPLEN can support at most 127 dwords */
+ len = skb_inner_network_header_len(skb);
+ if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ goto unsupported;
+
+ /* No need to validate L4LEN as TCP is the only protocol with a
+ * a flexible value and we support all possible values supported
+ * by TCP, which is at most 15 dwords
+ */
+
+ return features;
+
+unsupported:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
+/**
+ * idpf_set_mac - NDO callback to set port mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int idpf_set_mac(struct net_device *netdev, void *p)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ struct sockaddr *addr = p;
+ struct idpf_vport *vport;
+ int err = 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_MACFILTER)) {
+ dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n");
+ err = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n",
+ addr->sa_data);
+ err = -EADDRNOTAVAIL;
+ goto unlock_mutex;
+ }
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ goto unlock_mutex;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
+ if (err) {
+ __idpf_del_mac_filter(vport_config, addr->sa_data);
+ goto unlock_mutex;
+ }
+
+ if (is_valid_ether_addr(vport->default_mac_addr))
+ idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
+
+ ether_addr_copy(vport->default_mac_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_alloc_dma_mem - Allocate dma memory
+ * @hw: pointer to hw struct
+ * @mem: pointer to dma_mem struct
+ * @size: size of the memory to allocate
+ */
+void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
+{
+ struct idpf_adapter *adapter = hw->back;
+ size_t sz = ALIGN(size, 4096);
+
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &mem->pa, GFP_KERNEL);
+ mem->size = sz;
+
+ return mem->va;
+}
+
+/**
+ * idpf_free_dma_mem - Free the allocated dma memory
+ * @hw: pointer to hw struct
+ * @mem: pointer to dma_mem struct
+ */
+void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
+{
+ struct idpf_adapter *adapter = hw->back;
+
+ dma_free_coherent(&adapter->pdev->dev, mem->size,
+ mem->va, mem->pa);
+ mem->size = 0;
+ mem->va = NULL;
+ mem->pa = 0;
+}
+
+static const struct net_device_ops idpf_netdev_ops_splitq = {
+ .ndo_open = idpf_open,
+ .ndo_stop = idpf_stop,
+ .ndo_start_xmit = idpf_tx_splitq_start,
+ .ndo_features_check = idpf_features_check,
+ .ndo_set_rx_mode = idpf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = idpf_set_mac,
+ .ndo_change_mtu = idpf_change_mtu,
+ .ndo_get_stats64 = idpf_get_stats64,
+ .ndo_set_features = idpf_set_features,
+ .ndo_tx_timeout = idpf_tx_timeout,
+};
+
+static const struct net_device_ops idpf_netdev_ops_singleq = {
+ .ndo_open = idpf_open,
+ .ndo_stop = idpf_stop,
+ .ndo_start_xmit = idpf_tx_singleq_start,
+ .ndo_features_check = idpf_features_check,
+ .ndo_set_rx_mode = idpf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = idpf_set_mac,
+ .ndo_change_mtu = idpf_change_mtu,
+ .ndo_get_stats64 = idpf_get_stats64,
+ .ndo_set_features = idpf_set_features,
+ .ndo_tx_timeout = idpf_tx_timeout,
+};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
new file mode 100644
index 000000000000..e1febc74cefd
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_devids.h"
+
+#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
+
+MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_LICENSE("GPL");
+
+/**
+ * idpf_remove - Device removal routine
+ * @pdev: PCI device information struct
+ */
+static void idpf_remove(struct pci_dev *pdev)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+ int i;
+
+ set_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
+
+ /* Wait until vc_event_task is done to consider if any hard reset is
+ * in progress else we may go ahead and release the resources but the
+ * thread doing the hard reset might continue the init path and
+ * end up in bad state.
+ */
+ cancel_delayed_work_sync(&adapter->vc_event_task);
+ if (adapter->num_vfs)
+ idpf_sriov_configure(pdev, 0);
+
+ idpf_vc_core_deinit(adapter);
+ /* Be a good citizen and leave the device clean on exit */
+ adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
+ idpf_deinit_dflt_mbx(adapter);
+
+ if (!adapter->netdevs)
+ goto destroy_wqs;
+
+ /* There are some cases where it's possible to still have netdevs
+ * registered with the stack at this point, e.g. if the driver detected
+ * a HW reset and rmmod is called before it fully recovers. Unregister
+ * any stale netdevs here.
+ */
+ for (i = 0; i < adapter->max_vports; i++) {
+ if (!adapter->netdevs[i])
+ continue;
+ if (adapter->netdevs[i]->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(adapter->netdevs[i]);
+ free_netdev(adapter->netdevs[i]);
+ adapter->netdevs[i] = NULL;
+ }
+
+destroy_wqs:
+ destroy_workqueue(adapter->init_wq);
+ destroy_workqueue(adapter->serv_wq);
+ destroy_workqueue(adapter->mbx_wq);
+ destroy_workqueue(adapter->stats_wq);
+ destroy_workqueue(adapter->vc_event_wq);
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ kfree(adapter->vport_config[i]);
+ adapter->vport_config[i] = NULL;
+ }
+ kfree(adapter->vport_config);
+ adapter->vport_config = NULL;
+ kfree(adapter->netdevs);
+ adapter->netdevs = NULL;
+
+ mutex_destroy(&adapter->vport_ctrl_lock);
+ mutex_destroy(&adapter->vector_lock);
+ mutex_destroy(&adapter->queue_lock);
+ mutex_destroy(&adapter->vc_buf_lock);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(adapter);
+}
+
+/**
+ * idpf_shutdown - PCI callback for shutting down device
+ * @pdev: PCI device information struct
+ */
+static void idpf_shutdown(struct pci_dev *pdev)
+{
+ idpf_remove(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+/**
+ * idpf_cfg_hw - Initialize HW struct
+ * @adapter: adapter to setup hw struct for
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_cfg_hw(struct idpf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct idpf_hw *hw = &adapter->hw;
+
+ hw->hw_addr = pcim_iomap_table(pdev)[0];
+ if (!hw->hw_addr) {
+ pci_err(pdev, "failed to allocate PCI iomap table\n");
+
+ return -ENOMEM;
+ }
+
+ hw->back = adapter;
+
+ return 0;
+}
+
+/**
+ * idpf_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in idpf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct idpf_adapter *adapter;
+ int err;
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return -ENOMEM;
+
+ adapter->req_tx_splitq = true;
+ adapter->req_rx_splitq = true;
+
+ switch (ent->device) {
+ case IDPF_DEV_ID_PF:
+ idpf_dev_ops_init(adapter);
+ break;
+ case IDPF_DEV_ID_VF:
+ idpf_vf_dev_ops_init(adapter);
+ adapter->crc_enable = true;
+ break;
+ default:
+ err = -ENODEV;
+ dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n",
+ ent->device);
+ goto err_free;
+ }
+
+ adapter->pdev = pdev;
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto err_free;
+
+ err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (err) {
+ pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err));
+
+ goto err_free;
+ }
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err) {
+ pci_err(pdev, "DMA configuration failed: %pe\n", ERR_PTR(err));
+
+ goto err_free;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, adapter);
+
+ adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->init_wq) {
+ dev_err(dev, "Failed to allocate init workqueue\n");
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->serv_wq) {
+ dev_err(dev, "Failed to allocate service workqueue\n");
+ err = -ENOMEM;
+ goto err_serv_wq_alloc;
+ }
+
+ adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->mbx_wq) {
+ dev_err(dev, "Failed to allocate mailbox workqueue\n");
+ err = -ENOMEM;
+ goto err_mbx_wq_alloc;
+ }
+
+ adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->stats_wq) {
+ dev_err(dev, "Failed to allocate workqueue\n");
+ err = -ENOMEM;
+ goto err_stats_wq_alloc;
+ }
+
+ adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->vc_event_wq) {
+ dev_err(dev, "Failed to allocate virtchnl event workqueue\n");
+ err = -ENOMEM;
+ goto err_vc_event_wq_alloc;
+ }
+
+ /* setup msglvl */
+ adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M);
+
+ err = idpf_cfg_hw(adapter);
+ if (err) {
+ dev_err(dev, "Failed to configure HW structure for adapter: %d\n",
+ err);
+ goto err_cfg_hw;
+ }
+
+ mutex_init(&adapter->vport_ctrl_lock);
+ mutex_init(&adapter->vector_lock);
+ mutex_init(&adapter->queue_lock);
+ mutex_init(&adapter->vc_buf_lock);
+
+ init_waitqueue_head(&adapter->vchnl_wq);
+
+ INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
+ INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
+ INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
+ INIT_DELAYED_WORK(&adapter->stats_task, idpf_statistics_task);
+ INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task);
+
+ adapter->dev_ops.reg_ops.reset_reg_init(adapter);
+ set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
+ msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
+
+ return 0;
+
+err_cfg_hw:
+ destroy_workqueue(adapter->vc_event_wq);
+err_vc_event_wq_alloc:
+ destroy_workqueue(adapter->stats_wq);
+err_stats_wq_alloc:
+ destroy_workqueue(adapter->mbx_wq);
+err_mbx_wq_alloc:
+ destroy_workqueue(adapter->serv_wq);
+err_serv_wq_alloc:
+ destroy_workqueue(adapter->init_wq);
+err_free:
+ kfree(adapter);
+ return err;
+}
+
+/* idpf_pci_tbl - PCI Dev idpf ID Table
+ */
+static const struct pci_device_id idpf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)},
+ { PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)},
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(pci, idpf_pci_tbl);
+
+static struct pci_driver idpf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = idpf_pci_tbl,
+ .probe = idpf_probe,
+ .sriov_configure = idpf_sriov_configure,
+ .remove = idpf_remove,
+ .shutdown = idpf_shutdown,
+};
+module_pci_driver(idpf_driver);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h
new file mode 100644
index 000000000000..b21a04fccf0f
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_mem.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_MEM_H_
+#define _IDPF_MEM_H_
+
+#include <linux/io.h>
+
+struct idpf_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ size_t size;
+};
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+
+#endif /* _IDPF_MEM_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
new file mode 100644
index 000000000000..81288a17da2a
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -0,0 +1,1183 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+/**
+ * idpf_tx_singleq_csum - Enable tx checksum offloads
+ * @skb: pointer to skb
+ * @off: pointer to struct that holds offload parameters
+ *
+ * Returns 0 or error (negative) if checksum offload cannot be executed, 1
+ * otherwise.
+ */
+static int idpf_tx_singleq_csum(struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ u32 l4_len, l3_len, l2_len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 offset, cmd = 0;
+ u8 l4_proto = 0;
+ __be16 frag_off;
+ bool is_tso;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* compute outer L2 header size */
+ l2_len = ip.hdr - skb->data;
+ offset = FIELD_PREP(0x3F << IDPF_TX_DESC_LEN_MACLEN_S, l2_len / 2);
+ is_tso = !!(off->tx_flags & IDPF_TX_FLAGS_TSO);
+ if (skb->encapsulation) {
+ u32 tunnel = 0;
+
+ /* define outer network header type */
+ if (off->tx_flags & IDPF_TX_FLAGS_IPV4) {
+ /* The stack computes the IP header already, the only
+ * time we need the hardware to recompute it is in the
+ * case of TSO.
+ */
+ tunnel |= is_tso ?
+ IDPF_TX_CTX_EXT_IP_IPV4 :
+ IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+
+ l4_proto = ip.v4->protocol;
+ } else if (off->tx_flags & IDPF_TX_FLAGS_IPV6) {
+ tunnel |= IDPF_TX_CTX_EXT_IP_IPV6;
+
+ l4_proto = ip.v6->nexthdr;
+ if (ipv6_ext_hdr(l4_proto))
+ ipv6_skip_exthdr(skb, skb_network_offset(skb) +
+ sizeof(*ip.v6),
+ &l4_proto, &frag_off);
+ }
+
+ /* define outer transport */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ tunnel |= IDPF_TXD_CTX_UDP_TUNNELING;
+ break;
+ case IPPROTO_GRE:
+ tunnel |= IDPF_TXD_CTX_GRE_TUNNELING;
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ l4.hdr = skb_inner_network_header(skb);
+ break;
+ default:
+ if (is_tso)
+ return -1;
+
+ skb_checksum_help(skb);
+
+ return 0;
+ }
+ off->tx_flags |= IDPF_TX_FLAGS_TUNNEL;
+
+ /* compute outer L3 header size */
+ tunnel |= FIELD_PREP(IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M,
+ (l4.hdr - ip.hdr) / 4);
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
+ /* compute tunnel header size */
+ tunnel |= FIELD_PREP(IDPF_TXD_CTX_QW0_TUNN_NATLEN_M,
+ (ip.hdr - l4.hdr) / 2);
+
+ /* indicate if we need to offload outer UDP header */
+ if (is_tso &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ tunnel |= IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M;
+
+ /* record tunnel offload values */
+ off->cd_tunneling |= tunnel;
+
+ /* switch L4 header pointer from outer to inner */
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = 0;
+
+ /* reset type as we transition from outer to inner headers */
+ off->tx_flags &= ~(IDPF_TX_FLAGS_IPV4 | IDPF_TX_FLAGS_IPV6);
+ if (ip.v4->version == 4)
+ off->tx_flags |= IDPF_TX_FLAGS_IPV4;
+ if (ip.v6->version == 6)
+ off->tx_flags |= IDPF_TX_FLAGS_IPV6;
+ }
+
+ /* Enable IP checksum offloads */
+ if (off->tx_flags & IDPF_TX_FLAGS_IPV4) {
+ l4_proto = ip.v4->protocol;
+ /* See comment above regarding need for HW to recompute IP
+ * header checksum in the case of TSO.
+ */
+ if (is_tso)
+ cmd |= IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ else
+ cmd |= IDPF_TX_DESC_CMD_IIPT_IPV4;
+
+ } else if (off->tx_flags & IDPF_TX_FLAGS_IPV6) {
+ cmd |= IDPF_TX_DESC_CMD_IIPT_IPV6;
+ l4_proto = ip.v6->nexthdr;
+ if (ipv6_ext_hdr(l4_proto))
+ ipv6_skip_exthdr(skb, skb_network_offset(skb) +
+ sizeof(*ip.v6), &l4_proto,
+ &frag_off);
+ } else {
+ return -1;
+ }
+
+ /* compute inner L3 header size */
+ l3_len = l4.hdr - ip.hdr;
+ offset |= (l3_len / 4) << IDPF_TX_DESC_LEN_IPLEN_S;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_TCP;
+ l4_len = l4.tcp->doff;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_UDP;
+ l4_len = sizeof(struct udphdr) >> 2;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_SCTP;
+ l4_len = sizeof(struct sctphdr) >> 2;
+ break;
+ default:
+ if (is_tso)
+ return -1;
+
+ skb_checksum_help(skb);
+
+ return 0;
+ }
+
+ offset |= l4_len << IDPF_TX_DESC_LEN_L4_LEN_S;
+ off->td_cmd |= cmd;
+ off->hdr_offsets |= offset;
+
+ return 1;
+}
+
+/**
+ * idpf_tx_singleq_map - Build the Tx base descriptor
+ * @tx_q: queue to send buffer on
+ * @first: first buffer info buffer to use
+ * @offloads: pointer to struct that holds offload parameters
+ *
+ * This function loops over the skb data pointed to by *first
+ * and gets a physical address for each memory location and programs
+ * it and the length into the transmit base mode descriptor.
+ */
+static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
+ struct idpf_tx_buf *first,
+ struct idpf_tx_offload_params *offloads)
+{
+ u32 offsets = offloads->hdr_offsets;
+ struct idpf_tx_buf *tx_buf = first;
+ struct idpf_base_tx_desc *tx_desc;
+ struct sk_buff *skb = first->skb;
+ u64 td_cmd = offloads->td_cmd;
+ unsigned int data_len, size;
+ u16 i = tx_q->next_to_use;
+ struct netdev_queue *nq;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u64 td_tag = 0;
+
+ data_len = skb->data_len;
+ size = skb_headlen(skb);
+
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, i);
+
+ dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
+
+ /* write each descriptor with CRC bit */
+ if (tx_q->vport->crc_enable)
+ td_cmd |= IDPF_TX_DESC_CMD_ICRC;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+
+ if (dma_mapping_error(tx_q->dev, dma))
+ return idpf_tx_dma_map_error(tx_q, skb, first, i);
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ /* align size to end of page */
+ max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+
+ /* account for data chunks larger than the hardware
+ * can handle
+ */
+ while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
+ tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd,
+ offsets,
+ max_data,
+ td_tag);
+ tx_desc++;
+ i++;
+
+ if (i == tx_q->desc_count) {
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ i = 0;
+ }
+
+ dma += max_data;
+ size -= max_data;
+
+ max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ }
+
+ if (!data_len)
+ break;
+
+ tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
+ size, td_tag);
+ tx_desc++;
+ i++;
+
+ if (i == tx_q->desc_count) {
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_buf = &tx_q->tx_buf[i];
+ }
+
+ skb_tx_timestamp(first->skb);
+
+ /* write last descriptor with RS and EOP bits */
+ td_cmd |= (u64)(IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS);
+
+ tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
+ size, td_tag);
+
+ IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ netdev_tx_sent_queue(nq, first->bytecount);
+
+ idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
+}
+
+/**
+ * idpf_tx_singleq_get_ctx_desc - grab next desc and update buffer ring
+ * @txq: queue to put context descriptor on
+ *
+ * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
+ * ring entry to reflect that this index is a context descriptor
+ */
+static struct idpf_base_tx_ctx_desc *
+idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
+{
+ struct idpf_base_tx_ctx_desc *ctx_desc;
+ int ntu = txq->next_to_use;
+
+ memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
+ txq->tx_buf[ntu].ctx_entry = true;
+
+ ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu);
+
+ IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
+ txq->next_to_use = ntu;
+
+ return ctx_desc;
+}
+
+/**
+ * idpf_tx_singleq_build_ctx_desc - populate context descriptor
+ * @txq: queue to send buffer on
+ * @offload: offload parameter structure
+ **/
+static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
+ struct idpf_tx_offload_params *offload)
+{
+ struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq);
+ u64 qw1 = (u64)IDPF_TX_DESC_DTYPE_CTX;
+
+ if (offload->tso_segs) {
+ qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S;
+ qw1 |= ((u64)offload->tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) &
+ IDPF_TXD_CTX_QW1_TSO_LEN_M;
+ qw1 |= ((u64)offload->mss << IDPF_TXD_CTX_QW1_MSS_S) &
+ IDPF_TXD_CTX_QW1_MSS_M;
+
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.tx.lso_pkts);
+ u64_stats_update_end(&txq->stats_sync);
+ }
+
+ desc->qw0.tunneling_params = cpu_to_le32(offload->cd_tunneling);
+
+ desc->qw0.l2tag2 = 0;
+ desc->qw0.rsvd1 = 0;
+ desc->qw1 = cpu_to_le64(qw1);
+}
+
+/**
+ * idpf_tx_singleq_frame - Sends buffer on Tx ring using base descriptors
+ * @skb: send buffer
+ * @tx_q: queue to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_queue *tx_q)
+{
+ struct idpf_tx_offload_params offload = { };
+ struct idpf_tx_buf *first;
+ unsigned int count;
+ __be16 protocol;
+ int csum, tso;
+
+ count = idpf_tx_desc_count_required(tx_q, skb);
+ if (unlikely(!count))
+ return idpf_tx_drop_skb(tx_q, skb);
+
+ if (idpf_tx_maybe_stop_common(tx_q,
+ count + IDPF_TX_DESCS_PER_CACHE_LINE +
+ IDPF_TX_DESCS_FOR_CTX)) {
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IP))
+ offload.tx_flags |= IDPF_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ offload.tx_flags |= IDPF_TX_FLAGS_IPV6;
+
+ tso = idpf_tso(skb, &offload);
+ if (tso < 0)
+ goto out_drop;
+
+ csum = idpf_tx_singleq_csum(skb, &offload);
+ if (csum < 0)
+ goto out_drop;
+
+ if (tso || offload.cd_tunneling)
+ idpf_tx_singleq_build_ctx_desc(tx_q, &offload);
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_q->tx_buf[tx_q->next_to_use];
+ first->skb = skb;
+
+ if (tso) {
+ first->gso_segs = offload.tso_segs;
+ first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
+ } else {
+ first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->gso_segs = 1;
+ }
+ idpf_tx_singleq_map(tx_q, first, &offload);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ return idpf_tx_drop_skb(tx_q, skb);
+}
+
+/**
+ * idpf_tx_singleq_start - Selects the right Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ struct idpf_queue *tx_q;
+
+ tx_q = vport->txqs[skb_get_queue_mapping(skb)];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) {
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ return NETDEV_TX_OK;
+ }
+
+ return idpf_tx_singleq_frame(skb, tx_q);
+}
+
+/**
+ * idpf_tx_singleq_clean - Reclaim resources from queue
+ * @tx_q: Tx queue to clean
+ * @napi_budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ */
+static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
+ int *cleaned)
+{
+ unsigned int budget = tx_q->vport->compln_clean_budget;
+ unsigned int total_bytes = 0, total_pkts = 0;
+ struct idpf_base_tx_desc *tx_desc;
+ s16 ntc = tx_q->next_to_clean;
+ struct idpf_netdev_priv *np;
+ struct idpf_tx_buf *tx_buf;
+ struct idpf_vport *vport;
+ struct netdev_queue *nq;
+ bool dont_wake;
+
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc);
+ tx_buf = &tx_q->tx_buf[ntc];
+ ntc -= tx_q->desc_count;
+
+ do {
+ struct idpf_base_tx_desc *eop_desc;
+
+ /* If this entry in the ring was used as a context descriptor,
+ * it's corresponding entry in the buffer ring will indicate as
+ * such. We can skip this descriptor since there is no buffer
+ * to clean.
+ */
+ if (tx_buf->ctx_entry) {
+ /* Clear this flag here to avoid stale flag values when
+ * this buffer is used for actual data in the future.
+ * There are cases where the tx_buf struct / the flags
+ * field will not be cleared before being reused.
+ */
+ tx_buf->ctx_entry = false;
+ goto fetch_next_txq_desc;
+ }
+
+ /* if next_to_watch is not set then no work pending */
+ eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->qw1 &
+ cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_pkts += tx_buf->gso_segs;
+
+ napi_consume_skb(tx_buf->skb, napi_budget);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buf data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buf++;
+ tx_desc++;
+ ntc++;
+ if (unlikely(!ntc)) {
+ ntc -= tx_q->desc_count;
+ tx_buf = tx_q->tx_buf;
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* update budget only if we did something */
+ budget--;
+
+fetch_next_txq_desc:
+ tx_buf++;
+ tx_desc++;
+ ntc++;
+ if (unlikely(!ntc)) {
+ ntc -= tx_q->desc_count;
+ tx_buf = tx_q->tx_buf;
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ }
+ } while (likely(budget));
+
+ ntc += tx_q->desc_count;
+ tx_q->next_to_clean = ntc;
+
+ *cleaned += total_pkts;
+
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts);
+ u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ vport = tx_q->vport;
+ np = netdev_priv(vport->netdev);
+ nq = netdev_get_tx_queue(vport->netdev, tx_q->idx);
+
+ dont_wake = np->state != __IDPF_VPORT_UP ||
+ !netif_carrier_ok(vport->netdev);
+ __netif_txq_completed_wake(nq, total_pkts, total_bytes,
+ IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
+ dont_wake);
+
+ return !!budget;
+}
+
+/**
+ * idpf_tx_singleq_clean_all - Clean all Tx queues
+ * @q_vec: queue vector
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns false if clean is not complete else returns true
+ */
+static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
+ int *cleaned)
+{
+ u16 num_txq = q_vec->num_txq;
+ bool clean_complete = true;
+ int i, budget_per_q;
+
+ budget_per_q = num_txq ? max(budget / num_txq, 1) : 0;
+ for (i = 0; i < num_txq; i++) {
+ struct idpf_queue *q;
+
+ q = q_vec->tx[i];
+ clean_complete &= idpf_tx_singleq_clean(q, budget_per_q,
+ cleaned);
+ }
+
+ return clean_complete;
+}
+
+/**
+ * idpf_rx_singleq_test_staterr - tests bits in Rx descriptor
+ * status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_ptype_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->base_wb.qword1.status_error_ptype_len &
+ cpu_to_le64(stat_err_bits));
+}
+
+/**
+ * idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers
+ * @rxq: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ * @ntc: next to clean
+ */
+static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
+ union virtchnl2_rx_desc *rx_desc,
+ struct sk_buff *skb, u16 ntc)
+{
+ /* if we are the last buffer then there is nothing else to do */
+ if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ)))
+ return false;
+
+ return true;
+}
+
+/**
+ * idpf_rx_singleq_csum - Indicate in skb if checksum is good
+ * @rxq: Rx ring being processed
+ * @skb: skb currently being received and modified
+ * @csum_bits: checksum bits from descriptor
+ * @ptype: the packet type decoded by hardware
+ *
+ * skb->protocol must be set before this function is called
+ */
+static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb,
+ struct idpf_rx_csum_decoded *csum_bits,
+ u16 ptype)
+{
+ struct idpf_rx_ptype_decoded decoded;
+ bool ipv4, ipv6;
+
+ /* check if Rx checksum is enabled */
+ if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM)))
+ return;
+
+ /* check if HW has decoded the packet and checksum */
+ if (unlikely(!(csum_bits->l3l4p)))
+ return;
+
+ decoded = rxq->vport->rx_ptype_lkup[ptype];
+ if (unlikely(!(decoded.known && decoded.outer_ip)))
+ return;
+
+ ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4);
+ ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+
+ /* Check if there were any checksum errors */
+ if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe)))
+ goto checksum_fail;
+
+ /* Device could not do any checksum offload for certain extension
+ * headers as indicated by setting IPV6EXADD bit
+ */
+ if (unlikely(ipv6 && csum_bits->ipv6exadd))
+ return;
+
+ /* check for L4 errors and handle packets that were not able to be
+ * checksummed due to arrival speed
+ */
+ if (unlikely(csum_bits->l4e))
+ goto checksum_fail;
+
+ if (unlikely(csum_bits->nat && csum_bits->eudpe))
+ goto checksum_fail;
+
+ /* Handle packets that were not able to be checksummed due to arrival
+ * speed, in this case the stack can compute the csum.
+ */
+ if (unlikely(csum_bits->pprs))
+ return;
+
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
+ */
+ if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case IDPF_RX_PTYPE_INNER_PROT_ICMP:
+ case IDPF_RX_PTYPE_INNER_PROT_TCP:
+ case IDPF_RX_PTYPE_INNER_PROT_UDP:
+ case IDPF_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ default:
+ return;
+ }
+
+checksum_fail:
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_update_end(&rxq->stats_sync);
+}
+
+/**
+ * idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum
+ * @rx_q: Rx completion queue
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: Rx packet type
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
+ * descriptor writeback format.
+ **/
+static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
+{
+ struct idpf_rx_csum_decoded csum_bits;
+ u32 rx_error, rx_status;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
+
+ rx_status = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M, qword);
+ rx_error = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, qword);
+
+ csum_bits.ipe = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_M, rx_error);
+ csum_bits.eipe = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_M,
+ rx_error);
+ csum_bits.l4e = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_M, rx_error);
+ csum_bits.pprs = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_M,
+ rx_error);
+ csum_bits.l3l4p = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_M,
+ rx_status);
+ csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M,
+ rx_status);
+ csum_bits.nat = 0;
+ csum_bits.eudpe = 0;
+
+ idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+}
+
+/**
+ * idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum
+ * @rx_q: Rx completion queue
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: Rx packet type
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ **/
+static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
+{
+ struct idpf_rx_csum_decoded csum_bits;
+ u16 rx_status0, rx_status1;
+
+ rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
+ rx_status1 = le16_to_cpu(rx_desc->flex_nic_wb.status_error1);
+
+ csum_bits.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_M,
+ rx_status0);
+ csum_bits.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_M,
+ rx_status0);
+ csum_bits.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_M,
+ rx_status0);
+ csum_bits.eudpe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_M,
+ rx_status0);
+ csum_bits.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_M,
+ rx_status0);
+ csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_M,
+ rx_status0);
+ csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M,
+ rx_status1);
+ csum_bits.pprs = 0;
+
+ idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+}
+
+/**
+ * idpf_rx_singleq_base_hash - set the hash value in the skb
+ * @rx_q: Rx completion queue
+ * @skb: skb currently being received and modified
+ * @rx_desc: specific descriptor
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
+ * descriptor writeback format.
+ **/
+static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ u64 mask, qw1;
+
+ if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ return;
+
+ mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
+ qw1 = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
+
+ if (FIELD_GET(mask, qw1) == mask) {
+ u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss);
+
+ skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ }
+}
+
+/**
+ * idpf_rx_singleq_flex_hash - set the hash value in the skb
+ * @rx_q: Rx completion queue
+ * @skb: skb currently being received and modified
+ * @rx_desc: specific descriptor
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ **/
+static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ return;
+
+ if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.status_error0)))
+ skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash),
+ idpf_ptype_to_htype(decoded));
+}
+
+/**
+ * idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
+ * descriptor
+ * @rx_q: Rx ring being processed
+ * @skb: pointer to current skb being populated
+ * @rx_desc: descriptor for skb
+ * @ptype: packet type
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
+ */
+static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
+{
+ struct idpf_rx_ptype_decoded decoded =
+ rx_q->vport->rx_ptype_lkup[ptype];
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_q->vport->netdev);
+
+ /* Check if we're using base mode descriptor IDs */
+ if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
+ idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded);
+ idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype);
+ } else {
+ idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded);
+ idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype);
+ }
+}
+
+/**
+ * idpf_rx_singleq_buf_hw_alloc_all - Replace used receive buffers
+ * @rx_q: queue for which the hw buffers are allocated
+ * @cleaned_count: number of buffers to replace
+ *
+ * Returns false if all allocations were successful, true if any fail
+ */
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
+ u16 cleaned_count)
+{
+ struct virtchnl2_singleq_rx_buf_desc *desc;
+ u16 nta = rx_q->next_to_alloc;
+ struct idpf_rx_buf *buf;
+
+ if (!cleaned_count)
+ return false;
+
+ desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta);
+ buf = &rx_q->rx_buf.buf[nta];
+
+ do {
+ dma_addr_t addr;
+
+ addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size);
+ if (unlikely(addr == DMA_MAPPING_ERROR))
+ break;
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ desc->pkt_addr = cpu_to_le64(addr);
+ desc->hdr_addr = 0;
+ desc++;
+
+ buf++;
+ nta++;
+ if (unlikely(nta == rx_q->desc_count)) {
+ desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0);
+ buf = rx_q->rx_buf.buf;
+ nta = 0;
+ }
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_q->next_to_alloc != nta) {
+ idpf_rx_buf_hw_update(rx_q, nta);
+ rx_q->next_to_alloc = nta;
+ }
+
+ return !!cleaned_count;
+}
+
+/**
+ * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
+ * @rx_q: Rx descriptor queue
+ * @rx_desc: the descriptor to process
+ * @fields: storage for extracted values
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size and Rx packet type.
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
+ * descriptor writeback format.
+ */
+static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
+{
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
+
+ fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
+ fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+}
+
+/**
+ * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
+ * @rx_q: Rx descriptor queue
+ * @rx_desc: the descriptor to process
+ * @fields: storage for extracted values
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size and Rx packet type.
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ */
+static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
+{
+ fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
+ fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+}
+
+/**
+ * idpf_rx_singleq_extract_fields - Extract fields from the Rx descriptor
+ * @rx_q: Rx descriptor queue
+ * @rx_desc: the descriptor to process
+ * @fields: storage for extracted values
+ *
+ */
+static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
+{
+ if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
+ idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields);
+ else
+ idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields);
+}
+
+/**
+ * idpf_rx_singleq_clean - Reclaim resources after receive completes
+ * @rx_q: rx queue to clean
+ * @budget: Total limit on number of packets to process
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ */
+static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ struct sk_buff *skb = rx_q->skb;
+ u16 ntc = rx_q->next_to_clean;
+ u16 cleaned_count = 0;
+ bool failure = false;
+
+ /* Process Rx packets bounded by budget */
+ while (likely(total_rx_pkts < (unsigned int)budget)) {
+ struct idpf_rx_extracted fields = { };
+ union virtchnl2_rx_desc *rx_desc;
+ struct idpf_rx_buf *rx_buf;
+
+ /* get the Rx desc from Rx queue based on 'next_to_clean' */
+ rx_desc = IDPF_RX_DESC(rx_q, ntc);
+
+ /* status_error_ptype_len will always be zero for unused
+ * descriptors because it's cleared in cleanup, and overlaps
+ * with hdr_addr which is always zero because packet split
+ * isn't used, if the hardware wrote DD then the length will be
+ * non-zero
+ */
+#define IDPF_RXD_DD VIRTCHNL2_RX_BASE_DESC_STATUS_DD_M
+ if (!idpf_rx_singleq_test_staterr(rx_desc,
+ IDPF_RXD_DD))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc
+ */
+ dma_rmb();
+
+ idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
+
+ rx_buf = &rx_q->rx_buf.buf[ntc];
+ if (!fields.size) {
+ idpf_rx_put_page(rx_buf);
+ goto skip_data;
+ }
+
+ idpf_rx_sync_for_cpu(rx_buf, fields.size);
+ skb = rx_q->skb;
+ if (skb)
+ idpf_rx_add_frag(rx_buf, skb, fields.size);
+ else
+ skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
+
+skip_data:
+ IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
+
+ cleaned_count++;
+
+ /* skip if it is non EOP desc */
+ if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc))
+ continue;
+
+#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
+ VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M)
+ if (unlikely(idpf_rx_singleq_test_staterr(rx_desc,
+ IDPF_RXD_ERR_S))) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ continue;
+ }
+
+ /* pad skb if needed (to make valid ethernet frame) */
+ if (eth_skb_pad(skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+
+ /* protocol */
+ idpf_rx_singleq_process_skb_fields(rx_q, skb,
+ rx_desc, fields.rx_ptype);
+
+ /* send completed skb up the stack */
+ napi_gro_receive(&rx_q->q_vector->napi, skb);
+ skb = NULL;
+
+ /* update budget accounting */
+ total_rx_pkts++;
+ }
+
+ rx_q->skb = skb;
+
+ rx_q->next_to_clean = ntc;
+
+ if (cleaned_count)
+ failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
+
+ u64_stats_update_begin(&rx_q->stats_sync);
+ u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts);
+ u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_update_end(&rx_q->stats_sync);
+
+ /* guarantee a trip back through this routine if there was a failure */
+ return failure ? budget : (int)total_rx_pkts;
+}
+
+/**
+ * idpf_rx_singleq_clean_all - Clean all Rx queues
+ * @q_vec: queue vector
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns false if clean is not complete else returns true
+ */
+static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
+ int *cleaned)
+{
+ u16 num_rxq = q_vec->num_rxq;
+ bool clean_complete = true;
+ int budget_per_q, i;
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
+ for (i = 0; i < num_rxq; i++) {
+ struct idpf_queue *rxq = q_vec->rx[i];
+ int pkts_cleaned_per_q;
+
+ pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q);
+
+ /* if we clean as many as budgeted, we must not be done */
+ if (pkts_cleaned_per_q >= budget_per_q)
+ clean_complete = false;
+ *cleaned += pkts_cleaned_per_q;
+ }
+
+ return clean_complete;
+}
+
+/**
+ * idpf_vport_singleq_napi_poll - NAPI handler
+ * @napi: struct from which you get q_vector
+ * @budget: budget provided by stack
+ */
+int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct idpf_q_vector *q_vector =
+ container_of(napi, struct idpf_q_vector, napi);
+ bool clean_complete;
+ int work_done = 0;
+
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0) {
+ idpf_tx_singleq_clean_all(q_vector, budget, &work_done);
+
+ return budget;
+ }
+
+ clean_complete = idpf_rx_singleq_clean_all(q_vector, budget,
+ &work_done);
+ clean_complete &= idpf_tx_singleq_clean_all(q_vector, budget,
+ &work_done);
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ work_done = min_t(int, work_done, budget - 1);
+
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ idpf_vport_intr_update_itr_ena_irq(q_vector);
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
new file mode 100644
index 000000000000..6fa79898c42c
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -0,0 +1,4289 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+/**
+ * idpf_buf_lifo_push - push a buffer pointer onto stack
+ * @stack: pointer to stack struct
+ * @buf: pointer to buf to push
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
+ struct idpf_tx_stash *buf)
+{
+ if (unlikely(stack->top == stack->size))
+ return -ENOSPC;
+
+ stack->bufs[stack->top++] = buf;
+
+ return 0;
+}
+
+/**
+ * idpf_buf_lifo_pop - pop a buffer pointer from stack
+ * @stack: pointer to stack struct
+ **/
+static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
+{
+ if (unlikely(!stack->top))
+ return NULL;
+
+ return stack->bufs[--stack->top];
+}
+
+/**
+ * idpf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: TX queue
+ */
+void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
+
+ adapter->tx_timeout_count++;
+
+ netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
+ adapter->tx_timeout_count, txqueue);
+ if (!idpf_is_reset_in_prog(adapter)) {
+ set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq,
+ &adapter->vc_event_task,
+ msecs_to_jiffies(10));
+ }
+}
+
+/**
+ * idpf_tx_buf_rel - Release a Tx buffer
+ * @tx_q: the queue that owns the buffer
+ * @tx_buf: the buffer to free
+ */
+static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
+{
+ if (tx_buf->skb) {
+ if (dma_unmap_len(tx_buf, len))
+ dma_unmap_single(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(tx_buf->skb);
+ } else if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ }
+
+ tx_buf->next_to_watch = NULL;
+ tx_buf->skb = NULL;
+ tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ dma_unmap_len_set(tx_buf, len, 0);
+}
+
+/**
+ * idpf_tx_buf_rel_all - Free any empty Tx buffers
+ * @txq: queue to be cleaned
+ */
+static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
+{
+ u16 i;
+
+ /* Buffers already cleared, nothing to do */
+ if (!txq->tx_buf)
+ return;
+
+ /* Free all the Tx buffer sk_buffs */
+ for (i = 0; i < txq->desc_count; i++)
+ idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
+
+ kfree(txq->tx_buf);
+ txq->tx_buf = NULL;
+
+ if (!txq->buf_stack.bufs)
+ return;
+
+ for (i = 0; i < txq->buf_stack.size; i++)
+ kfree(txq->buf_stack.bufs[i]);
+
+ kfree(txq->buf_stack.bufs);
+ txq->buf_stack.bufs = NULL;
+}
+
+/**
+ * idpf_tx_desc_rel - Free Tx resources per queue
+ * @txq: Tx descriptor ring for a specific queue
+ * @bufq: buffer q or completion q
+ *
+ * Free all transmit software resources
+ */
+static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq)
+{
+ if (bufq)
+ idpf_tx_buf_rel_all(txq);
+
+ if (!txq->desc_ring)
+ return;
+
+ dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
+ txq->desc_ring = NULL;
+ txq->next_to_alloc = 0;
+ txq->next_to_use = 0;
+ txq->next_to_clean = 0;
+}
+
+/**
+ * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
+ * @vport: virtual port structure
+ *
+ * Free all transmit software resources
+ */
+static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
+{
+ int i, j;
+
+ if (!vport->txq_grps)
+ return;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (j = 0; j < txq_grp->num_txq; j++)
+ idpf_tx_desc_rel(txq_grp->txqs[j], true);
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ idpf_tx_desc_rel(txq_grp->complq, false);
+ }
+}
+
+/**
+ * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
+ * @tx_q: queue for which the buffers are allocated
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
+{
+ int buf_size;
+ int i;
+
+ /* Allocate book keeping buffers only. Buffers to be supplied to HW
+ * are allocated by kernel network stack and received as part of skb
+ */
+ buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
+ tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!tx_q->tx_buf)
+ return -ENOMEM;
+
+ /* Initialize tx_bufs with invalid completion tags */
+ for (i = 0; i < tx_q->desc_count; i++)
+ tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+
+ /* Initialize tx buf stack for out-of-order completions if
+ * flow scheduling offload is enabled
+ */
+ tx_q->buf_stack.bufs =
+ kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *),
+ GFP_KERNEL);
+ if (!tx_q->buf_stack.bufs)
+ return -ENOMEM;
+
+ tx_q->buf_stack.size = tx_q->desc_count;
+ tx_q->buf_stack.top = tx_q->desc_count;
+
+ for (i = 0; i < tx_q->desc_count; i++) {
+ tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]),
+ GFP_KERNEL);
+ if (!tx_q->buf_stack.bufs[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_tx_desc_alloc - Allocate the Tx descriptors
+ * @tx_q: the tx ring to set up
+ * @bufq: buffer or completion queue
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
+{
+ struct device *dev = tx_q->dev;
+ u32 desc_sz;
+ int err;
+
+ if (bufq) {
+ err = idpf_tx_buf_alloc_all(tx_q);
+ if (err)
+ goto err_alloc;
+
+ desc_sz = sizeof(struct idpf_base_tx_desc);
+ } else {
+ desc_sz = sizeof(struct idpf_splitq_tx_compl_desc);
+ }
+
+ tx_q->size = tx_q->desc_count * desc_sz;
+
+ /* Allocate descriptors also round up to nearest 4K */
+ tx_q->size = ALIGN(tx_q->size, 4096);
+ tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
+ GFP_KERNEL);
+ if (!tx_q->desc_ring) {
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_q->size);
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ tx_q->next_to_alloc = 0;
+ tx_q->next_to_use = 0;
+ tx_q->next_to_clean = 0;
+ set_bit(__IDPF_Q_GEN_CHK, tx_q->flags);
+
+ return 0;
+
+err_alloc:
+ idpf_tx_desc_rel(tx_q, bufq);
+
+ return err;
+}
+
+/**
+ * idpf_tx_desc_alloc_all - allocate all queues Tx resources
+ * @vport: virtual port private structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
+{
+ struct device *dev = &vport->adapter->pdev->dev;
+ int err = 0;
+ int i, j;
+
+ /* Setup buffer queues. In single queue model buffer queues and
+ * completion queues will be same
+ */
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
+ struct idpf_queue *txq = vport->txq_grps[i].txqs[j];
+ u8 gen_bits = 0;
+ u16 bufidx_mask;
+
+ err = idpf_tx_desc_alloc(txq, true);
+ if (err) {
+ dev_err(dev, "Allocation for Tx Queue %u failed\n",
+ i);
+ goto err_out;
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ continue;
+
+ txq->compl_tag_cur_gen = 0;
+
+ /* Determine the number of bits in the bufid
+ * mask and add one to get the start of the
+ * generation bits
+ */
+ bufidx_mask = txq->desc_count - 1;
+ while (bufidx_mask >> 1) {
+ txq->compl_tag_gen_s++;
+ bufidx_mask = bufidx_mask >> 1;
+ }
+ txq->compl_tag_gen_s++;
+
+ gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
+ txq->compl_tag_gen_s;
+ txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
+
+ /* Set bufid mask based on location of first
+ * gen bit; it cannot simply be the descriptor
+ * ring size-1 since we can have size values
+ * where not all of those bits are set.
+ */
+ txq->compl_tag_bufid_m =
+ GETMAXVAL(txq->compl_tag_gen_s);
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ continue;
+
+ /* Setup completion queues */
+ err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false);
+ if (err) {
+ dev_err(dev, "Allocation for Tx Completion Queue %u failed\n",
+ i);
+ goto err_out;
+ }
+ }
+
+err_out:
+ if (err)
+ idpf_tx_desc_rel_all(vport);
+
+ return err;
+}
+
+/**
+ * idpf_rx_page_rel - Release an rx buffer page
+ * @rxq: the queue that owns the buffer
+ * @rx_buf: the buffer to free
+ */
+static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf)
+{
+ if (unlikely(!rx_buf->page))
+ return;
+
+ page_pool_put_full_page(rxq->pp, rx_buf->page, false);
+
+ rx_buf->page = NULL;
+ rx_buf->page_offset = 0;
+}
+
+/**
+ * idpf_rx_hdr_buf_rel_all - Release header buffer memory
+ * @rxq: queue to use
+ */
+static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq)
+{
+ struct idpf_adapter *adapter = rxq->vport->adapter;
+
+ dma_free_coherent(&adapter->pdev->dev,
+ rxq->desc_count * IDPF_HDR_BUF_SIZE,
+ rxq->rx_buf.hdr_buf_va,
+ rxq->rx_buf.hdr_buf_pa);
+ rxq->rx_buf.hdr_buf_va = NULL;
+}
+
+/**
+ * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue
+ * @rxq: queue to be cleaned
+ */
+static void idpf_rx_buf_rel_all(struct idpf_queue *rxq)
+{
+ u16 i;
+
+ /* queue already cleared, nothing to do */
+ if (!rxq->rx_buf.buf)
+ return;
+
+ /* Free all the bufs allocated and given to hw on Rx queue */
+ for (i = 0; i < rxq->desc_count; i++)
+ idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]);
+
+ if (rxq->rx_hsplit_en)
+ idpf_rx_hdr_buf_rel_all(rxq);
+
+ page_pool_destroy(rxq->pp);
+ rxq->pp = NULL;
+
+ kfree(rxq->rx_buf.buf);
+ rxq->rx_buf.buf = NULL;
+}
+
+/**
+ * idpf_rx_desc_rel - Free a specific Rx q resources
+ * @rxq: queue to clean the resources from
+ * @bufq: buffer q or completion q
+ * @q_model: single or split q model
+ *
+ * Free a specific rx queue resources
+ */
+static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
+{
+ if (!rxq)
+ return;
+
+ if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) {
+ dev_kfree_skb_any(rxq->skb);
+ rxq->skb = NULL;
+ }
+
+ if (bufq || !idpf_is_queue_model_split(q_model))
+ idpf_rx_buf_rel_all(rxq);
+
+ rxq->next_to_alloc = 0;
+ rxq->next_to_clean = 0;
+ rxq->next_to_use = 0;
+ if (!rxq->desc_ring)
+ return;
+
+ dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma);
+ rxq->desc_ring = NULL;
+}
+
+/**
+ * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
+ * @vport: virtual port structure
+ *
+ * Free all rx queues resources
+ */
+static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
+{
+ struct idpf_rxq_group *rx_qgrp;
+ u16 num_rxq;
+ int i, j;
+
+ if (!vport->rxq_grps)
+ return;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ rx_qgrp = &vport->rxq_grps[i];
+
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
+ idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j],
+ false, vport->rxq_model);
+ continue;
+ }
+
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ for (j = 0; j < num_rxq; j++)
+ idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
+ false, vport->rxq_model);
+
+ if (!rx_qgrp->splitq.bufq_sets)
+ continue;
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_bufq_set *bufq_set =
+ &rx_qgrp->splitq.bufq_sets[j];
+
+ idpf_rx_desc_rel(&bufq_set->bufq, true,
+ vport->rxq_model);
+ }
+ }
+}
+
+/**
+ * idpf_rx_buf_hw_update - Store the new tail and head values
+ * @rxq: queue to bump
+ * @val: new head index
+ */
+void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val)
+{
+ rxq->next_to_use = val;
+
+ if (unlikely(!rxq->tail))
+ return;
+
+ /* writel has an implicit memory barrier */
+ writel(val, rxq->tail);
+}
+
+/**
+ * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
+ * @rxq: ring to use
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
+{
+ struct idpf_adapter *adapter = rxq->vport->adapter;
+
+ rxq->rx_buf.hdr_buf_va =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ IDPF_HDR_BUF_SIZE * rxq->desc_count,
+ &rxq->rx_buf.hdr_buf_pa,
+ GFP_KERNEL);
+ if (!rxq->rx_buf.hdr_buf_va)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * idpf_rx_post_buf_refill - Post buffer id to refill queue
+ * @refillq: refill queue to post to
+ * @buf_id: buffer id to post
+ */
+static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
+{
+ u16 nta = refillq->next_to_alloc;
+
+ /* store the buffer ID and the SW maintained GEN bit to the refillq */
+ refillq->ring[nta] =
+ ((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) |
+ (!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) <<
+ IDPF_RX_BI_GEN_S);
+
+ if (unlikely(++nta == refillq->desc_count)) {
+ nta = 0;
+ change_bit(__IDPF_Q_GEN_CHK, refillq->flags);
+ }
+ refillq->next_to_alloc = nta;
+}
+
+/**
+ * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
+ * @bufq: buffer queue to post to
+ * @buf_id: buffer id to post
+ *
+ * Returns false if buffer could not be allocated, true otherwise.
+ */
+static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
+{
+ struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
+ u16 nta = bufq->next_to_alloc;
+ struct idpf_rx_buf *buf;
+ dma_addr_t addr;
+
+ splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
+ buf = &bufq->rx_buf.buf[buf_id];
+
+ if (bufq->rx_hsplit_en) {
+ splitq_rx_desc->hdr_addr =
+ cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
+ (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ }
+
+ addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
+ if (unlikely(addr == DMA_MAPPING_ERROR))
+ return false;
+
+ splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
+ splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
+
+ nta++;
+ if (unlikely(nta == bufq->desc_count))
+ nta = 0;
+ bufq->next_to_alloc = nta;
+
+ return true;
+}
+
+/**
+ * idpf_rx_post_init_bufs - Post initial buffers to bufq
+ * @bufq: buffer queue to post working set to
+ * @working_set: number of buffers to put in working set
+ *
+ * Returns true if @working_set bufs were posted successfully, false otherwise.
+ */
+static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
+{
+ int i;
+
+ for (i = 0; i < working_set; i++) {
+ if (!idpf_rx_post_buf_desc(bufq, i))
+ return false;
+ }
+
+ idpf_rx_buf_hw_update(bufq,
+ bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1));
+
+ return true;
+}
+
+/**
+ * idpf_rx_create_page_pool - Create a page pool
+ * @rxbufq: RX queue to create page pool for
+ *
+ * Returns &page_pool on success, casted -errno on failure
+ */
+static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
+{
+ struct page_pool_params pp = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = rxbufq->desc_count,
+ .nid = NUMA_NO_NODE,
+ .dev = rxbufq->vport->netdev->dev.parent,
+ .max_len = PAGE_SIZE,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ };
+
+ if (rxbufq->rx_buf_size == IDPF_RX_BUF_2048)
+ pp.flags |= PP_FLAG_PAGE_FRAG;
+
+ return page_pool_create(&pp);
+}
+
+/**
+ * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
+ * @rxbufq: queue for which the buffers are allocated; equivalent to
+ * rxq when operating in singleq mode
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq)
+{
+ int err = 0;
+
+ /* Allocate book keeping buffers */
+ rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count,
+ sizeof(struct idpf_rx_buf), GFP_KERNEL);
+ if (!rxbufq->rx_buf.buf) {
+ err = -ENOMEM;
+ goto rx_buf_alloc_all_out;
+ }
+
+ if (rxbufq->rx_hsplit_en) {
+ err = idpf_rx_hdr_buf_alloc_all(rxbufq);
+ if (err)
+ goto rx_buf_alloc_all_out;
+ }
+
+ /* Allocate buffers to be given to HW. */
+ if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) {
+ int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq);
+
+ if (!idpf_rx_post_init_bufs(rxbufq, working_set))
+ err = -ENOMEM;
+ } else {
+ if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq,
+ rxbufq->desc_count - 1))
+ err = -ENOMEM;
+ }
+
+rx_buf_alloc_all_out:
+ if (err)
+ idpf_rx_buf_rel_all(rxbufq);
+
+ return err;
+}
+
+/**
+ * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
+ * @rxbufq: RX queue to create page pool for
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
+{
+ struct page_pool *pool;
+
+ pool = idpf_rx_create_page_pool(rxbufq);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rxbufq->pp = pool;
+
+ return idpf_rx_buf_alloc_all(rxbufq);
+}
+
+/**
+ * idpf_rx_bufs_init_all - Initialize all RX bufs
+ * @vport: virtual port struct
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_rx_bufs_init_all(struct idpf_vport *vport)
+{
+ struct idpf_rxq_group *rx_qgrp;
+ struct idpf_queue *q;
+ int i, j, err;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ rx_qgrp = &vport->rxq_grps[i];
+
+ /* Allocate bufs for the rxq itself in singleq */
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ int num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ q = rx_qgrp->singleq.rxqs[j];
+ err = idpf_rx_bufs_init(q);
+ if (err)
+ return err;
+ }
+
+ continue;
+ }
+
+ /* Otherwise, allocate bufs for the buffer queues */
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ err = idpf_rx_bufs_init(q);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_rx_desc_alloc - Allocate queue Rx resources
+ * @rxq: Rx queue for which the resources are setup
+ * @bufq: buffer or completion queue
+ * @q_model: single or split queue model
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
+{
+ struct device *dev = rxq->dev;
+
+ if (bufq)
+ rxq->size = rxq->desc_count *
+ sizeof(struct virtchnl2_splitq_rx_buf_desc);
+ else
+ rxq->size = rxq->desc_count *
+ sizeof(union virtchnl2_rx_desc);
+
+ /* Allocate descriptors and also round up to nearest 4K */
+ rxq->size = ALIGN(rxq->size, 4096);
+ rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
+ &rxq->dma, GFP_KERNEL);
+ if (!rxq->desc_ring) {
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rxq->size);
+ return -ENOMEM;
+ }
+
+ rxq->next_to_alloc = 0;
+ rxq->next_to_clean = 0;
+ rxq->next_to_use = 0;
+ set_bit(__IDPF_Q_GEN_CHK, rxq->flags);
+
+ return 0;
+}
+
+/**
+ * idpf_rx_desc_alloc_all - allocate all RX queues resources
+ * @vport: virtual port structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
+{
+ struct device *dev = &vport->adapter->pdev->dev;
+ struct idpf_rxq_group *rx_qgrp;
+ struct idpf_queue *q;
+ int i, j, err;
+ u16 num_rxq;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ rx_qgrp = &vport->rxq_grps[i];
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+ err = idpf_rx_desc_alloc(q, false, vport->rxq_model);
+ if (err) {
+ dev_err(dev, "Memory allocation for Rx Queue %u failed\n",
+ i);
+ goto err_out;
+ }
+ }
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ continue;
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ err = idpf_rx_desc_alloc(q, true, vport->rxq_model);
+ if (err) {
+ dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n",
+ i);
+ goto err_out;
+ }
+ }
+ }
+
+ return 0;
+
+err_out:
+ idpf_rx_desc_rel_all(vport);
+
+ return err;
+}
+
+/**
+ * idpf_txq_group_rel - Release all resources for txq groups
+ * @vport: vport to release txq groups on
+ */
+static void idpf_txq_group_rel(struct idpf_vport *vport)
+{
+ int i, j;
+
+ if (!vport->txq_grps)
+ return;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (j = 0; j < txq_grp->num_txq; j++) {
+ kfree(txq_grp->txqs[j]);
+ txq_grp->txqs[j] = NULL;
+ }
+ kfree(txq_grp->complq);
+ txq_grp->complq = NULL;
+ }
+ kfree(vport->txq_grps);
+ vport->txq_grps = NULL;
+}
+
+/**
+ * idpf_rxq_sw_queue_rel - Release software queue resources
+ * @rx_qgrp: rx queue group with software queues
+ */
+static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
+{
+ int i, j;
+
+ for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
+ struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
+
+ for (j = 0; j < bufq_set->num_refillqs; j++) {
+ kfree(bufq_set->refillqs[j].ring);
+ bufq_set->refillqs[j].ring = NULL;
+ }
+ kfree(bufq_set->refillqs);
+ bufq_set->refillqs = NULL;
+ }
+}
+
+/**
+ * idpf_rxq_group_rel - Release all resources for rxq groups
+ * @vport: vport to release rxq groups on
+ */
+static void idpf_rxq_group_rel(struct idpf_vport *vport)
+{
+ int i;
+
+ if (!vport->rxq_grps)
+ return;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq;
+ int j;
+
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ for (j = 0; j < num_rxq; j++) {
+ kfree(rx_qgrp->splitq.rxq_sets[j]);
+ rx_qgrp->splitq.rxq_sets[j] = NULL;
+ }
+
+ idpf_rxq_sw_queue_rel(rx_qgrp);
+ kfree(rx_qgrp->splitq.bufq_sets);
+ rx_qgrp->splitq.bufq_sets = NULL;
+ } else {
+ num_rxq = rx_qgrp->singleq.num_rxq;
+ for (j = 0; j < num_rxq; j++) {
+ kfree(rx_qgrp->singleq.rxqs[j]);
+ rx_qgrp->singleq.rxqs[j] = NULL;
+ }
+ }
+ }
+ kfree(vport->rxq_grps);
+ vport->rxq_grps = NULL;
+}
+
+/**
+ * idpf_vport_queue_grp_rel_all - Release all queue groups
+ * @vport: vport to release queue groups for
+ */
+static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
+{
+ idpf_txq_group_rel(vport);
+ idpf_rxq_group_rel(vport);
+}
+
+/**
+ * idpf_vport_queues_rel - Free memory for all queues
+ * @vport: virtual port
+ *
+ * Free the memory allocated for queues associated to a vport
+ */
+void idpf_vport_queues_rel(struct idpf_vport *vport)
+{
+ idpf_tx_desc_rel_all(vport);
+ idpf_rx_desc_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(vport);
+
+ kfree(vport->txqs);
+ vport->txqs = NULL;
+}
+
+/**
+ * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
+ * @vport: vport to init txqs on
+ *
+ * We get a queue index from skb->queue_mapping and we need a fast way to
+ * dereference the queue from queue groups. This allows us to quickly pull a
+ * txq based on a queue index.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
+{
+ int i, j, k = 0;
+
+ vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *),
+ GFP_KERNEL);
+
+ if (!vport->txqs)
+ return -ENOMEM;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_grp->num_txq; j++, k++) {
+ vport->txqs[k] = tx_grp->txqs[j];
+ vport->txqs[k]->idx = k;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_vport_init_num_qs - Initialize number of queues
+ * @vport: vport to initialize queues
+ * @vport_msg: data to be filled into vport
+ */
+void idpf_vport_init_num_qs(struct idpf_vport *vport,
+ struct virtchnl2_create_vport *vport_msg)
+{
+ struct idpf_vport_user_config_data *config_data;
+ u16 idx = vport->idx;
+
+ config_data = &vport->adapter->vport_config[idx]->user_config;
+ vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
+ vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
+ /* number of txqs and rxqs in config data will be zeros only in the
+ * driver load path and we dont update them there after
+ */
+ if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
+ config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
+ config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
+ }
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+
+ /* Adjust number of buffer queues per Rx queue group. */
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ vport->num_bufqs_per_qgrp = 0;
+ vport->bufq_size[0] = IDPF_RX_BUF_2048;
+
+ return;
+ }
+
+ vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ /* Bufq[0] default buffer size is 4K
+ * Bufq[1] default buffer size is 2K
+ */
+ vport->bufq_size[0] = IDPF_RX_BUF_4096;
+ vport->bufq_size[1] = IDPF_RX_BUF_2048;
+}
+
+/**
+ * idpf_vport_calc_num_q_desc - Calculate number of queue groups
+ * @vport: vport to calculate q groups for
+ */
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
+{
+ struct idpf_vport_user_config_data *config_data;
+ int num_bufqs = vport->num_bufqs_per_qgrp;
+ u32 num_req_txq_desc, num_req_rxq_desc;
+ u16 idx = vport->idx;
+ int i;
+
+ config_data = &vport->adapter->vport_config[idx]->user_config;
+ num_req_txq_desc = config_data->num_req_txq_desc;
+ num_req_rxq_desc = config_data->num_req_rxq_desc;
+
+ vport->complq_desc_count = 0;
+ if (num_req_txq_desc) {
+ vport->txq_desc_count = num_req_txq_desc;
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ vport->complq_desc_count = num_req_txq_desc;
+ if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
+ vport->complq_desc_count =
+ IDPF_MIN_TXQ_COMPLQ_DESC;
+ }
+ } else {
+ vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
+ if (idpf_is_queue_model_split(vport->txq_model))
+ vport->complq_desc_count =
+ IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
+ }
+
+ if (num_req_rxq_desc)
+ vport->rxq_desc_count = num_req_rxq_desc;
+ else
+ vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
+
+ for (i = 0; i < num_bufqs; i++) {
+ if (!vport->bufq_desc_count[i])
+ vport->bufq_desc_count[i] =
+ IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
+ num_bufqs);
+ }
+}
+
+/**
+ * idpf_vport_calc_total_qs - Calculate total number of queues
+ * @adapter: private data struct
+ * @vport_idx: vport idx to retrieve vport pointer
+ * @vport_msg: message to fill with data
+ * @max_q: vport max queue info
+ *
+ * Return 0 on success, error value on failure.
+ */
+int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_vport_max_q *max_q)
+{
+ int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
+ int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
+ u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
+ struct idpf_vport_config *vport_config;
+ u16 num_txq_grps, num_rxq_grps;
+ u32 num_qs;
+
+ vport_config = adapter->vport_config[vport_idx];
+ if (vport_config) {
+ num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
+ num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
+ } else {
+ int num_cpus;
+
+ /* Restrict num of queues to cpus online as a default
+ * configuration to give best performance. User can always
+ * override to a max number of queues via ethtool.
+ */
+ num_cpus = num_online_cpus();
+
+ dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
+ dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
+ dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
+ dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
+ }
+
+ if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
+ num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
+ vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
+ IDPF_COMPLQ_PER_GROUP);
+ vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
+ IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
+ } else {
+ num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
+ num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
+ dflt_singleq_txqs);
+ vport_msg->num_tx_q = cpu_to_le16(num_qs);
+ vport_msg->num_tx_complq = 0;
+ }
+ if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
+ num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
+ vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
+ IDPF_MAX_BUFQS_PER_RXQ_GRP);
+ vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
+ IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
+ } else {
+ num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
+ num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
+ dflt_singleq_rxqs);
+ vport_msg->num_rx_q = cpu_to_le16(num_qs);
+ vport_msg->num_rx_bufq = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_vport_calc_num_q_groups - Calculate number of queue groups
+ * @vport: vport to calculate q groups for
+ */
+void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
+{
+ if (idpf_is_queue_model_split(vport->txq_model))
+ vport->num_txq_grp = vport->num_txq;
+ else
+ vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ vport->num_rxq_grp = vport->num_rxq;
+ else
+ vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
+}
+
+/**
+ * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
+ * @vport: vport to calculate queues for
+ * @num_txq: return parameter for number of TX queues
+ * @num_rxq: return parameter for number of RX queues
+ */
+static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
+ u16 *num_txq, u16 *num_rxq)
+{
+ if (idpf_is_queue_model_split(vport->txq_model))
+ *num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
+ else
+ *num_txq = vport->num_txq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ *num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
+ else
+ *num_rxq = vport->num_rxq;
+}
+
+/**
+ * idpf_rxq_set_descids - set the descids supported by this queue
+ * @vport: virtual port data structure
+ * @q: rx queue for which descids are set
+ *
+ */
+static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
+{
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ } else {
+ if (vport->base_rxd)
+ q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
+ else
+ q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+ }
+}
+
+/**
+ * idpf_txq_group_alloc - Allocate all txq group resources
+ * @vport: vport to allocate txq groups for
+ * @num_txq: number of txqs to allocate for each group
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
+{
+ int err, i;
+
+ vport->txq_grps = kcalloc(vport->num_txq_grp,
+ sizeof(*vport->txq_grps), GFP_KERNEL);
+ if (!vport->txq_grps)
+ return -ENOMEM;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ struct idpf_adapter *adapter = vport->adapter;
+ int j;
+
+ tx_qgrp->vport = vport;
+ tx_qgrp->num_txq = num_txq;
+
+ for (j = 0; j < tx_qgrp->num_txq; j++) {
+ tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
+ GFP_KERNEL);
+ if (!tx_qgrp->txqs[j]) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ }
+
+ for (j = 0; j < tx_qgrp->num_txq; j++) {
+ struct idpf_queue *q = tx_qgrp->txqs[j];
+
+ q->dev = &adapter->pdev->dev;
+ q->desc_count = vport->txq_desc_count;
+ q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
+ q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
+ q->vport = vport;
+ q->txq_grp = tx_qgrp;
+ hash_init(q->sched_buf_hash);
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_SPLITQ_QSCHED))
+ set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ continue;
+
+ tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
+ sizeof(*tx_qgrp->complq),
+ GFP_KERNEL);
+ if (!tx_qgrp->complq) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ tx_qgrp->complq->dev = &adapter->pdev->dev;
+ tx_qgrp->complq->desc_count = vport->complq_desc_count;
+ tx_qgrp->complq->vport = vport;
+ tx_qgrp->complq->txq_grp = tx_qgrp;
+ }
+
+ return 0;
+
+err_alloc:
+ idpf_txq_group_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_rxq_group_alloc - Allocate all rxq group resources
+ * @vport: vport to allocate rxq groups for
+ * @num_rxq: number of rxqs to allocate for each group
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_queue *q;
+ int i, k, err = 0;
+
+ vport->rxq_grps = kcalloc(vport->num_rxq_grp,
+ sizeof(struct idpf_rxq_group), GFP_KERNEL);
+ if (!vport->rxq_grps)
+ return -ENOMEM;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ int j;
+
+ rx_qgrp->vport = vport;
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ rx_qgrp->singleq.num_rxq = num_rxq;
+ for (j = 0; j < num_rxq; j++) {
+ rx_qgrp->singleq.rxqs[j] =
+ kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
+ GFP_KERNEL);
+ if (!rx_qgrp->singleq.rxqs[j]) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ }
+ goto skip_splitq_rx_init;
+ }
+ rx_qgrp->splitq.num_rxq_sets = num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ rx_qgrp->splitq.rxq_sets[j] =
+ kzalloc(sizeof(struct idpf_rxq_set),
+ GFP_KERNEL);
+ if (!rx_qgrp->splitq.rxq_sets[j]) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ }
+
+ rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
+ sizeof(struct idpf_bufq_set),
+ GFP_KERNEL);
+ if (!rx_qgrp->splitq.bufq_sets) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_bufq_set *bufq_set =
+ &rx_qgrp->splitq.bufq_sets[j];
+ int swq_size = sizeof(struct idpf_sw_queue);
+
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ q->dev = &adapter->pdev->dev;
+ q->desc_count = vport->bufq_desc_count[j];
+ q->vport = vport;
+ q->rxq_grp = rx_qgrp;
+ q->idx = j;
+ q->rx_buf_size = vport->bufq_size[j];
+ q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
+ q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
+ if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
+ IDPF_CAP_HSPLIT) &&
+ idpf_is_queue_model_split(vport->rxq_model)) {
+ q->rx_hsplit_en = true;
+ q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
+ }
+
+ bufq_set->num_refillqs = num_rxq;
+ bufq_set->refillqs = kcalloc(num_rxq, swq_size,
+ GFP_KERNEL);
+ if (!bufq_set->refillqs) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ for (k = 0; k < bufq_set->num_refillqs; k++) {
+ struct idpf_sw_queue *refillq =
+ &bufq_set->refillqs[k];
+
+ refillq->dev = &vport->adapter->pdev->dev;
+ refillq->desc_count =
+ vport->bufq_desc_count[j];
+ set_bit(__IDPF_Q_GEN_CHK, refillq->flags);
+ set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ refillq->ring = kcalloc(refillq->desc_count,
+ sizeof(u16),
+ GFP_KERNEL);
+ if (!refillq->ring) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ }
+ }
+
+skip_splitq_rx_init:
+ for (j = 0; j < num_rxq; j++) {
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ q = rx_qgrp->singleq.rxqs[j];
+ goto setup_rxq;
+ }
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ rx_qgrp->splitq.rxq_sets[j]->refillq0 =
+ &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
+ if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
+ rx_qgrp->splitq.rxq_sets[j]->refillq1 =
+ &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
+
+ if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
+ IDPF_CAP_HSPLIT) &&
+ idpf_is_queue_model_split(vport->rxq_model)) {
+ q->rx_hsplit_en = true;
+ q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
+ }
+
+setup_rxq:
+ q->dev = &adapter->pdev->dev;
+ q->desc_count = vport->rxq_desc_count;
+ q->vport = vport;
+ q->rxq_grp = rx_qgrp;
+ q->idx = (i * num_rxq) + j;
+ /* In splitq mode, RXQ buffer size should be
+ * set to that of the first buffer queue
+ * associated with this RXQ
+ */
+ q->rx_buf_size = vport->bufq_size[0];
+ q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
+ q->rx_max_pkt_size = vport->netdev->mtu +
+ IDPF_PACKET_HDR_PAD;
+ idpf_rxq_set_descids(vport, q);
+ }
+ }
+
+err_alloc:
+ if (err)
+ idpf_rxq_group_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
+ * @vport: vport with qgrps to allocate
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
+{
+ u16 num_txq, num_rxq;
+ int err;
+
+ idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
+
+ err = idpf_txq_group_alloc(vport, num_txq);
+ if (err)
+ goto err_out;
+
+ err = idpf_rxq_group_alloc(vport, num_rxq);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ idpf_vport_queue_grp_rel_all(vport);
+
+ return err;
+}
+
+/**
+ * idpf_vport_queues_alloc - Allocate memory for all queues
+ * @vport: virtual port
+ *
+ * Allocate memory for queues associated with a vport. Returns 0 on success,
+ * negative on failure.
+ */
+int idpf_vport_queues_alloc(struct idpf_vport *vport)
+{
+ int err;
+
+ err = idpf_vport_queue_grp_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_tx_desc_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_rx_desc_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_vport_init_fast_path_txqs(vport);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ idpf_vport_queues_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_tx_handle_sw_marker - Handle queue marker packet
+ * @tx_q: tx queue to handle software marker
+ */
+static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
+{
+ struct idpf_vport *vport = tx_q->vport;
+ int i;
+
+ clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
+ /* Hardware must write marker packets to all queues associated with
+ * completion queues. So check if all queues received marker packets
+ */
+ for (i = 0; i < vport->num_txq; i++)
+ /* If we're still waiting on any other TXQ marker completions,
+ * just return now since we cannot wake up the marker_wq yet.
+ */
+ if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags))
+ return;
+
+ /* Drain complete */
+ set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
+ wake_up(&vport->sw_marker_wq);
+}
+
+/**
+ * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
+ * packet
+ * @tx_q: tx queue to clean buffer from
+ * @tx_buf: buffer to be cleaned
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @napi_budget: Used to determine if we are in netpoll
+ */
+static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
+ struct idpf_tx_buf *tx_buf,
+ struct idpf_cleaned_stats *cleaned,
+ int napi_budget)
+{
+ napi_consume_skb(tx_buf->skb, napi_budget);
+
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_single(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+
+ /* clear tx_buf data */
+ tx_buf->skb = NULL;
+
+ cleaned->bytes += tx_buf->bytecount;
+ cleaned->packets += tx_buf->gso_segs;
+}
+
+/**
+ * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
+ * out of order completions
+ * @txq: queue to clean
+ * @compl_tag: completion tag of packet to clean (from completion descriptor)
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @budget: Used to determine if we are in netpoll
+ */
+static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
+ struct idpf_cleaned_stats *cleaned,
+ int budget)
+{
+ struct idpf_tx_stash *stash;
+ struct hlist_node *tmp_buf;
+
+ /* Buffer completion */
+ hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf,
+ hlist, compl_tag) {
+ if (unlikely(stash->buf.compl_tag != (int)compl_tag))
+ continue;
+
+ if (stash->buf.skb) {
+ idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
+ budget);
+ } else if (dma_unmap_len(&stash->buf, len)) {
+ dma_unmap_page(txq->dev,
+ dma_unmap_addr(&stash->buf, dma),
+ dma_unmap_len(&stash->buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(&stash->buf, len, 0);
+ }
+
+ /* Push shadow buf back onto stack */
+ idpf_buf_lifo_push(&txq->buf_stack, stash);
+
+ hash_del(&stash->hlist);
+ }
+}
+
+/**
+ * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
+ * later time (only relevant for flow scheduling mode)
+ * @txq: Tx queue to clean
+ * @tx_buf: buffer to store
+ */
+static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
+ struct idpf_tx_buf *tx_buf)
+{
+ struct idpf_tx_stash *stash;
+
+ if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
+ !dma_unmap_len(tx_buf, len)))
+ return 0;
+
+ stash = idpf_buf_lifo_pop(&txq->buf_stack);
+ if (unlikely(!stash)) {
+ net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
+ txq->vport->netdev->name);
+
+ return -ENOMEM;
+ }
+
+ /* Store buffer params in shadow buffer */
+ stash->buf.skb = tx_buf->skb;
+ stash->buf.bytecount = tx_buf->bytecount;
+ stash->buf.gso_segs = tx_buf->gso_segs;
+ dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
+ dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
+ stash->buf.compl_tag = tx_buf->compl_tag;
+
+ /* Add buffer to buf_hash table to be freed later */
+ hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag);
+
+ memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
+
+ /* Reinitialize buf_id portion of tag */
+ tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+
+ return 0;
+}
+
+#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
+do { \
+ (ntc)++; \
+ if (unlikely(!(ntc))) { \
+ ntc -= (txq)->desc_count; \
+ buf = (txq)->tx_buf; \
+ desc = IDPF_FLEX_TX_DESC(txq, 0); \
+ } else { \
+ (buf)++; \
+ (desc)++; \
+ } \
+} while (0)
+
+/**
+ * idpf_tx_splitq_clean - Reclaim resources from buffer queue
+ * @tx_q: Tx queue to clean
+ * @end: queue index until which it should be cleaned
+ * @napi_budget: Used to determine if we are in netpoll
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @descs_only: true if queue is using flow-based scheduling and should
+ * not clean buffers at this time
+ *
+ * Cleans the queue descriptor ring. If the queue is using queue-based
+ * scheduling, the buffers will be cleaned as well. If the queue is using
+ * flow-based scheduling, only the descriptors are cleaned at this time.
+ * Separate packet completion events will be reported on the completion queue,
+ * and the buffers will be cleaned separately. The stats are not updated from
+ * this function when using flow-based scheduling.
+ */
+static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
+ int napi_budget,
+ struct idpf_cleaned_stats *cleaned,
+ bool descs_only)
+{
+ union idpf_tx_flex_desc *next_pending_desc = NULL;
+ union idpf_tx_flex_desc *tx_desc;
+ s16 ntc = tx_q->next_to_clean;
+ struct idpf_tx_buf *tx_buf;
+
+ tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
+ next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
+ tx_buf = &tx_q->tx_buf[ntc];
+ ntc -= tx_q->desc_count;
+
+ while (tx_desc != next_pending_desc) {
+ union idpf_tx_flex_desc *eop_desc;
+
+ /* If this entry in the ring was used as a context descriptor,
+ * it's corresponding entry in the buffer ring will have an
+ * invalid completion tag since no buffer was used. We can
+ * skip this descriptor since there is no buffer to clean.
+ */
+ if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
+ goto fetch_next_txq_desc;
+
+ eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ if (descs_only) {
+ if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
+ goto tx_splitq_clean_out;
+
+ while (tx_desc != eop_desc) {
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+ tx_desc, tx_buf);
+
+ if (dma_unmap_len(tx_buf, len)) {
+ if (idpf_stash_flow_sch_buffers(tx_q,
+ tx_buf))
+ goto tx_splitq_clean_out;
+ }
+ }
+ } else {
+ idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
+ napi_budget);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+ tx_desc, tx_buf);
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+ }
+
+fetch_next_txq_desc:
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
+ }
+
+tx_splitq_clean_out:
+ ntc += tx_q->desc_count;
+ tx_q->next_to_clean = ntc;
+}
+
+#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
+do { \
+ (buf)++; \
+ (ntc)++; \
+ if (unlikely((ntc) == (txq)->desc_count)) { \
+ buf = (txq)->tx_buf; \
+ ntc = 0; \
+ } \
+} while (0)
+
+/**
+ * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
+ * @txq: queue to clean
+ * @compl_tag: completion tag of packet to clean (from completion descriptor)
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @budget: Used to determine if we are in netpoll
+ *
+ * Cleans all buffers associated with the input completion tag either from the
+ * TX buffer ring or from the hash table if the buffers were previously
+ * stashed. Returns the byte/segment count for the cleaned packet associated
+ * this completion tag.
+ */
+static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
+ struct idpf_cleaned_stats *cleaned,
+ int budget)
+{
+ u16 idx = compl_tag & txq->compl_tag_bufid_m;
+ struct idpf_tx_buf *tx_buf = NULL;
+ u16 ntc = txq->next_to_clean;
+ u16 num_descs_cleaned = 0;
+ u16 orig_idx = idx;
+
+ tx_buf = &txq->tx_buf[idx];
+
+ while (tx_buf->compl_tag == (int)compl_tag) {
+ if (tx_buf->skb) {
+ idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
+ } else if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(txq->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+
+ memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
+ tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+
+ num_descs_cleaned++;
+ idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ }
+
+ /* If we didn't clean anything on the ring for this completion, there's
+ * nothing more to do.
+ */
+ if (unlikely(!num_descs_cleaned))
+ return false;
+
+ /* Otherwise, if we did clean a packet on the ring directly, it's safe
+ * to assume that the descriptors starting from the original
+ * next_to_clean up until the previously cleaned packet can be reused.
+ * Therefore, we will go back in the ring and stash any buffers still
+ * in the ring into the hash table to be cleaned later.
+ */
+ tx_buf = &txq->tx_buf[ntc];
+ while (tx_buf != &txq->tx_buf[orig_idx]) {
+ idpf_stash_flow_sch_buffers(txq, tx_buf);
+ idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
+ }
+
+ /* Finally, update next_to_clean to reflect the work that was just done
+ * on the ring, if any. If the packet was only cleaned from the hash
+ * table, the ring will not be impacted, therefore we should not touch
+ * next_to_clean. The updated idx is used here
+ */
+ txq->next_to_clean = idx;
+
+ return true;
+}
+
+/**
+ * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
+ * whether on the buffer ring or in the hash table
+ * @txq: Tx ring to clean
+ * @desc: pointer to completion queue descriptor to extract completion
+ * information from
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @budget: Used to determine if we are in netpoll
+ *
+ * Returns bytes/packets cleaned
+ */
+static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
+ struct idpf_splitq_tx_compl_desc *desc,
+ struct idpf_cleaned_stats *cleaned,
+ int budget)
+{
+ u16 compl_tag;
+
+ if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) {
+ u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
+
+ return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ }
+
+ compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
+
+ /* If we didn't clean anything on the ring, this packet must be
+ * in the hash table. Go clean it there.
+ */
+ if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
+ idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
+}
+
+/**
+ * idpf_tx_clean_complq - Reclaim resources on completion queue
+ * @complq: Tx ring to clean
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ */
+static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
+ int *cleaned)
+{
+ struct idpf_splitq_tx_compl_desc *tx_desc;
+ struct idpf_vport *vport = complq->vport;
+ s16 ntc = complq->next_to_clean;
+ struct idpf_netdev_priv *np;
+ unsigned int complq_budget;
+ bool complq_ok = true;
+ int i;
+
+ complq_budget = vport->compln_clean_budget;
+ tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
+ ntc -= complq->desc_count;
+
+ do {
+ struct idpf_cleaned_stats cleaned_stats = { };
+ struct idpf_queue *tx_q;
+ int rel_tx_qid;
+ u16 hw_head;
+ u8 ctype; /* completion type */
+ u16 gen;
+
+ /* if the descriptor isn't done, no work yet to do */
+ gen = (le16_to_cpu(tx_desc->qid_comptype_gen) &
+ IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
+ if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
+ break;
+
+ /* Find necessary info of TX queue to clean buffers */
+ rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) &
+ IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
+ if (rel_tx_qid >= complq->txq_grp->num_txq ||
+ !complq->txq_grp->txqs[rel_tx_qid]) {
+ dev_err(&complq->vport->adapter->pdev->dev,
+ "TxQ not found\n");
+ goto fetch_next_desc;
+ }
+ tx_q = complq->txq_grp->txqs[rel_tx_qid];
+
+ /* Determine completion type */
+ ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) &
+ IDPF_TXD_COMPLQ_COMPL_TYPE_M) >>
+ IDPF_TXD_COMPLQ_COMPL_TYPE_S;
+ switch (ctype) {
+ case IDPF_TXD_COMPLT_RE:
+ hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
+
+ idpf_tx_splitq_clean(tx_q, hw_head, budget,
+ &cleaned_stats, true);
+ break;
+ case IDPF_TXD_COMPLT_RS:
+ idpf_tx_handle_rs_completion(tx_q, tx_desc,
+ &cleaned_stats, budget);
+ break;
+ case IDPF_TXD_COMPLT_SW_MARKER:
+ idpf_tx_handle_sw_marker(tx_q);
+ break;
+ default:
+ dev_err(&tx_q->vport->adapter->pdev->dev,
+ "Unknown TX completion type: %d\n",
+ ctype);
+ goto fetch_next_desc;
+ }
+
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets);
+ u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes);
+ tx_q->cleaned_pkts += cleaned_stats.packets;
+ tx_q->cleaned_bytes += cleaned_stats.bytes;
+ complq->num_completions++;
+ u64_stats_update_end(&tx_q->stats_sync);
+
+fetch_next_desc:
+ tx_desc++;
+ ntc++;
+ if (unlikely(!ntc)) {
+ ntc -= complq->desc_count;
+ tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
+ change_bit(__IDPF_Q_GEN_CHK, complq->flags);
+ }
+
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ complq_budget--;
+ } while (likely(complq_budget));
+
+ /* Store the state of the complq to be used later in deciding if a
+ * TXQ can be started again
+ */
+ if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
+ complq_ok = false;
+
+ np = netdev_priv(complq->vport->netdev);
+ for (i = 0; i < complq->txq_grp->num_txq; ++i) {
+ struct idpf_queue *tx_q = complq->txq_grp->txqs[i];
+ struct netdev_queue *nq;
+ bool dont_wake;
+
+ /* We didn't clean anything on this queue, move along */
+ if (!tx_q->cleaned_bytes)
+ continue;
+
+ *cleaned += tx_q->cleaned_pkts;
+
+ /* Update BQL */
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+
+ dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
+ np->state != __IDPF_VPORT_UP ||
+ !netif_carrier_ok(tx_q->vport->netdev);
+ /* Check if the TXQ needs to and can be restarted */
+ __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
+ IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
+ dont_wake);
+
+ /* Reset cleaned stats for the next time this queue is
+ * cleaned
+ */
+ tx_q->cleaned_bytes = 0;
+ tx_q->cleaned_pkts = 0;
+ }
+
+ ntc += complq->desc_count;
+ complq->next_to_clean = ntc;
+
+ return !!complq_budget;
+}
+
+/**
+ * idpf_tx_splitq_build_ctb - populate command tag and size for queue
+ * based scheduling descriptors
+ * @desc: descriptor to populate
+ * @params: pointer to tx params struct
+ * @td_cmd: command to be filled in desc
+ * @size: size of buffer
+ */
+void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size)
+{
+ desc->q.qw1.cmd_dtype =
+ cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M);
+ desc->q.qw1.cmd_dtype |=
+ cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) &
+ IDPF_FLEX_TXD_QW1_CMD_M);
+ desc->q.qw1.buf_size = cpu_to_le16((u16)size);
+ desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
+}
+
+/**
+ * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
+ * scheduling descriptors
+ * @desc: descriptor to populate
+ * @params: pointer to tx params struct
+ * @td_cmd: command to be filled in desc
+ * @size: size of buffer
+ */
+void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size)
+{
+ desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
+ desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
+ desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
+}
+
+/**
+ * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
+ * @tx_q: the queue to be checked
+ * @size: number of descriptors we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ */
+int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
+{
+ struct netdev_queue *nq;
+
+ if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
+ return 0;
+
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+
+ return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
+}
+
+/**
+ * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
+ * @tx_q: the queue to be checked
+ * @descs_needed: number of descriptors required for this packet
+ *
+ * Returns 0 if stop is not needed
+ */
+static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
+ unsigned int descs_needed)
+{
+ if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
+ goto splitq_stop;
+
+ /* If there are too many outstanding completions expected on the
+ * completion queue, stop the TX queue to give the device some time to
+ * catch up
+ */
+ if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
+ goto splitq_stop;
+
+ /* Also check for available book keeping buffers; if we are low, stop
+ * the queue to wait for more completions
+ */
+ if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
+ goto splitq_stop;
+
+ return 0;
+
+splitq_stop:
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+ netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
+
+ return -EBUSY;
+}
+
+/**
+ * idpf_tx_buf_hw_update - Store the new tail value
+ * @tx_q: queue to bump
+ * @val: new tail index
+ * @xmit_more: more skb's pending
+ *
+ * The naming here is special in that 'hw' signals that this function is about
+ * to do a register write to update our queue status. We know this can only
+ * mean tail here as HW should be owning head for TX.
+ */
+void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+ bool xmit_more)
+{
+ struct netdev_queue *nq;
+
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ tx_q->next_to_use = val;
+
+ idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ /* notify HW of packet */
+ if (netif_xmit_stopped(nq) || !xmit_more)
+ writel(val, tx_q->tail);
+}
+
+/**
+ * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
+ * @txq: queue to send buffer on
+ * @skb: send buffer
+ *
+ * Returns number of data descriptors needed for this skb.
+ */
+unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+ struct sk_buff *skb)
+{
+ const struct skb_shared_info *shinfo;
+ unsigned int count = 0, i;
+
+ count += !!skb_headlen(skb);
+
+ if (!skb_is_nonlinear(skb))
+ return count;
+
+ shinfo = skb_shinfo(skb);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ unsigned int size;
+
+ size = skb_frag_size(&shinfo->frags[i]);
+
+ /* We only need to use the idpf_size_to_txd_count check if the
+ * fragment is going to span multiple descriptors,
+ * i.e. size >= 16K.
+ */
+ if (size >= SZ_16K)
+ count += idpf_size_to_txd_count(size);
+ else
+ count++;
+ }
+
+ if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
+ if (__skb_linearize(skb))
+ return 0;
+
+ count = idpf_size_to_txd_count(skb->len);
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.tx.linearize);
+ u64_stats_update_end(&txq->stats_sync);
+ }
+
+ return count;
+}
+
+/**
+ * idpf_tx_dma_map_error - handle TX DMA map errors
+ * @txq: queue to send buffer on
+ * @skb: send buffer
+ * @first: original first buffer info buffer for packet
+ * @idx: starting point on ring to unwind
+ */
+void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+ struct idpf_tx_buf *first, u16 idx)
+{
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.tx.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ /* clear dma mappings for failed tx_buf map */
+ for (;;) {
+ struct idpf_tx_buf *tx_buf;
+
+ tx_buf = &txq->tx_buf[idx];
+ idpf_tx_buf_rel(txq, tx_buf);
+ if (tx_buf == first)
+ break;
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ if (skb_is_gso(skb)) {
+ union idpf_tx_flex_desc *tx_desc;
+
+ /* If we failed a DMA mapping for a TSO packet, we will have
+ * used one additional descriptor for a context
+ * descriptor. Reset that here.
+ */
+ tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
+ memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ /* Update tail in case netdev_xmit_more was previously true */
+ idpf_tx_buf_hw_update(txq, idx, false);
+}
+
+/**
+ * idpf_tx_splitq_bump_ntu - adjust NTU and generation
+ * @txq: the tx ring to wrap
+ * @ntu: ring index to bump
+ */
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
+{
+ ntu++;
+
+ if (ntu == txq->desc_count) {
+ ntu = 0;
+ txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
+ }
+
+ return ntu;
+}
+
+/**
+ * idpf_tx_splitq_map - Build the Tx flex descriptor
+ * @tx_q: queue to send buffer on
+ * @params: pointer to splitq params struct
+ * @first: first buffer info buffer to use
+ *
+ * This function loops over the skb data pointed to by *first
+ * and gets a physical address for each memory location and programs
+ * it and the length into the transmit flex descriptor.
+ */
+static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
+ struct idpf_tx_splitq_params *params,
+ struct idpf_tx_buf *first)
+{
+ union idpf_tx_flex_desc *tx_desc;
+ unsigned int data_len, size;
+ struct idpf_tx_buf *tx_buf;
+ u16 i = tx_q->next_to_use;
+ struct netdev_queue *nq;
+ struct sk_buff *skb;
+ skb_frag_t *frag;
+ u16 td_cmd = 0;
+ dma_addr_t dma;
+
+ skb = first->skb;
+
+ td_cmd = params->offload.td_cmd;
+
+ data_len = skb->data_len;
+ size = skb_headlen(skb);
+
+ tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
+
+ dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_buf = first;
+
+ params->compl_tag =
+ (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+
+ if (dma_mapping_error(tx_q->dev, dma))
+ return idpf_tx_dma_map_error(tx_q, skb, first, i);
+
+ tx_buf->compl_tag = params->compl_tag;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ /* buf_addr is in same location for both desc types */
+ tx_desc->q.buf_addr = cpu_to_le64(dma);
+
+ /* The stack can send us fragments that are too large for a
+ * single descriptor i.e. frag size > 16K-1. We will need to
+ * split the fragment across multiple descriptors in this case.
+ * To adhere to HW alignment restrictions, the fragment needs
+ * to be split such that the first chunk ends on a 4K boundary
+ * and all subsequent chunks start on a 4K boundary. We still
+ * want to send as much data as possible though, so our
+ * intermediate descriptor chunk size will be 12K.
+ *
+ * For example, consider a 32K fragment mapped to DMA addr 2600.
+ * ------------------------------------------------------------
+ * | frag_size = 32K |
+ * ------------------------------------------------------------
+ * |2600 |16384 |28672
+ *
+ * 3 descriptors will be used for this fragment. The HW expects
+ * the descriptors to contain the following:
+ * ------------------------------------------------------------
+ * | size = 13784 | size = 12K | size = 6696 |
+ * | dma = 2600 | dma = 16384 | dma = 28672 |
+ * ------------------------------------------------------------
+ *
+ * We need to first adjust the max_data for the first chunk so
+ * that it ends on a 4K boundary. By negating the value of the
+ * DMA address and taking only the low order bits, we're
+ * effectively calculating
+ * 4K - (DMA addr lower order bits) =
+ * bytes to next boundary.
+ *
+ * Add that to our base aligned max_data (12K) and we have
+ * our first chunk size. In the example above,
+ * 13784 = 12K + (4096-2600)
+ *
+ * After guaranteeing the first chunk ends on a 4K boundary, we
+ * will give the intermediate descriptors 12K chunks and
+ * whatever is left to the final descriptor. This ensures that
+ * all descriptors used for the remaining chunks of the
+ * fragment start on a 4K boundary and we use as few
+ * descriptors as possible.
+ */
+ max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+ while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
+ idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
+ max_data);
+
+ tx_desc++;
+ i++;
+
+ if (i == tx_q->desc_count) {
+ tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ i = 0;
+ tx_q->compl_tag_cur_gen =
+ IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ }
+
+ /* Since this packet has a buffer that is going to span
+ * multiple descriptors, it's going to leave holes in
+ * to the TX buffer ring. To ensure these holes do not
+ * cause issues in the cleaning routines, we will clear
+ * them of any stale data and assign them the same
+ * completion tag as the current packet. Then when the
+ * packet is being cleaned, the cleaning routines will
+ * simply pass over these holes and finish cleaning the
+ * rest of the packet.
+ */
+ memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
+ tx_q->tx_buf[i].compl_tag = params->compl_tag;
+
+ /* Adjust the DMA offset and the remaining size of the
+ * fragment. On the first iteration of this loop,
+ * max_data will be >= 12K and <= 16K-1. On any
+ * subsequent iteration of this loop, max_data will
+ * always be 12K.
+ */
+ dma += max_data;
+ size -= max_data;
+
+ /* Reset max_data since remaining chunks will be 12K
+ * at most
+ */
+ max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+
+ /* buf_addr is in same location for both desc types */
+ tx_desc->q.buf_addr = cpu_to_le64(dma);
+ }
+
+ if (!data_len)
+ break;
+
+ idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
+ tx_desc++;
+ i++;
+
+ if (i == tx_q->desc_count) {
+ tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ i = 0;
+ tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_buf = &tx_q->tx_buf[i];
+ }
+
+ /* record SW timestamp if HW timestamp is not available */
+ skb_tx_timestamp(skb);
+
+ /* write last descriptor with RS and EOP bits */
+ td_cmd |= params->eop_cmd;
+ idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
+ i = idpf_tx_splitq_bump_ntu(tx_q, i);
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ tx_q->txq_grp->num_completions_pending++;
+
+ /* record bytecount for BQL */
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ netdev_tx_sent_queue(nq, first->bytecount);
+
+ idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
+}
+
+/**
+ * idpf_tso - computes mss and TSO length to prepare for TSO
+ * @skb: pointer to skb
+ * @off: pointer to struct that holds offload parameters
+ *
+ * Returns error (negative) if TSO was requested but cannot be applied to the
+ * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
+ */
+int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_start;
+ int err;
+
+ if (!shinfo->gso_size)
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else if (ip.v6->version == 6) {
+ ip.v6->payload_len = 0;
+ }
+
+ l4_start = skb_transport_offset(skb);
+
+ /* remove payload length from checksum */
+ paylen = skb->len - l4_start;
+
+ switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
+ case SKB_GSO_TCPV4:
+ case SKB_GSO_TCPV6:
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
+ break;
+ case SKB_GSO_UDP_L4:
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
+ l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ off->tso_len = skb->len - off->tso_hdr_len;
+ off->mss = shinfo->gso_size;
+ off->tso_segs = shinfo->gso_segs;
+
+ off->tx_flags |= IDPF_TX_FLAGS_TSO;
+
+ return 1;
+}
+
+/**
+ * __idpf_chk_linearize - Check skb is not using too many buffers
+ * @skb: send buffer
+ * @max_bufs: maximum number of buffers
+ *
+ * For TSO we need to count the TSO header and segment payload separately. As
+ * such we need to check cases where we have max_bufs-1 fragments or more as we
+ * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
+ * for the segment payload in the first descriptor, and another max_buf-1 for
+ * the fragments.
+ */
+static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const skb_frag_t *frag, *stale;
+ int nr_frags, sum;
+
+ /* no need to check if number of frags is less than max_bufs - 1 */
+ nr_frags = shinfo->nr_frags;
+ if (nr_frags < (max_bufs - 1))
+ return false;
+
+ /* We need to walk through the list and validate that each group
+ * of max_bufs-2 fragments totals at least gso_size.
+ */
+ nr_frags -= max_bufs - 2;
+ frag = &shinfo->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We use
+ * this as the worst case scenario in which the frag ahead of us only
+ * provides one byte which is why we are limited to max_bufs-2
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - shinfo->gso_size;
+
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ for (stale = &shinfo->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
+ sum += skb_frag_size(frag++);
+
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > IDPF_TX_MAX_DESC_DATA) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ } while (stale_size > IDPF_TX_MAX_DESC_DATA);
+ }
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ if (!nr_frags--)
+ break;
+
+ sum -= stale_size;
+ }
+
+ return false;
+}
+
+/**
+ * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
+ * @skb: send buffer
+ * @max_bufs: maximum scatter gather buffers for single packet
+ * @count: number of buffers this packet needs
+ *
+ * Make sure we don't exceed maximum scatter gather buffers for a single
+ * packet. We have to do some special checking around the boundary (max_bufs-1)
+ * if TSO is on since we need count the TSO header and payload separately.
+ * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
+ * header, 1 for segment payload, and then 7 for the fragments.
+ */
+bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count)
+{
+ if (likely(count < max_bufs))
+ return false;
+ if (skb_is_gso(skb))
+ return __idpf_chk_linearize(skb, max_bufs);
+
+ return count > max_bufs;
+}
+
+/**
+ * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
+ * @txq: queue to put context descriptor on
+ *
+ * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
+ * ring entry to reflect that this index is a context descriptor
+ */
+static struct idpf_flex_tx_ctx_desc *
+idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
+{
+ struct idpf_flex_tx_ctx_desc *desc;
+ int i = txq->next_to_use;
+
+ memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
+ txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+
+ /* grab the next descriptor */
+ desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
+ txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
+
+ return desc;
+}
+
+/**
+ * idpf_tx_drop_skb - free the SKB and bump tail if necessary
+ * @tx_q: queue to send buffer on
+ * @skb: pointer to skb
+ */
+netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
+{
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
+ * @skb: send buffer
+ * @tx_q: queue to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
+ struct idpf_queue *tx_q)
+{
+ struct idpf_tx_splitq_params tx_params = { };
+ struct idpf_tx_buf *first;
+ unsigned int count;
+ int tso;
+
+ count = idpf_tx_desc_count_required(tx_q, skb);
+ if (unlikely(!count))
+ return idpf_tx_drop_skb(tx_q, skb);
+
+ tso = idpf_tso(skb, &tx_params.offload);
+ if (unlikely(tso < 0))
+ return idpf_tx_drop_skb(tx_q, skb);
+
+ /* Check for splitq specific TX resources */
+ count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
+ if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ if (tso) {
+ /* If tso is needed, set up context desc */
+ struct idpf_flex_tx_ctx_desc *ctx_desc =
+ idpf_tx_splitq_get_ctx_desc(tx_q);
+
+ ctx_desc->tso.qw1.cmd_dtype =
+ cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
+ IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
+ ctx_desc->tso.qw0.flex_tlen =
+ cpu_to_le32(tx_params.offload.tso_len &
+ IDPF_TXD_FLEX_CTX_TLEN_M);
+ ctx_desc->tso.qw0.mss_rt =
+ cpu_to_le16(tx_params.offload.mss &
+ IDPF_TXD_FLEX_CTX_MSS_RT_M);
+ ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
+
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
+ u64_stats_update_end(&tx_q->stats_sync);
+ }
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_q->tx_buf[tx_q->next_to_use];
+ first->skb = skb;
+
+ if (tso) {
+ first->gso_segs = tx_params.offload.tso_segs;
+ first->bytecount = skb->len +
+ ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
+ } else {
+ first->gso_segs = 1;
+ first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ }
+
+ if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
+ tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
+ tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
+ /* Set the RE bit to catch any packets that may have not been
+ * stashed during RS completion cleaning. MIN_GAP is set to
+ * MIN_RING size to ensure it will be set at least once each
+ * time around the ring.
+ */
+ if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
+ tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
+ tx_q->txq_grp->num_completions_pending++;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
+
+ } else {
+ tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
+ tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
+ }
+
+ idpf_tx_splitq_map(tx_q, &tx_params, first);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * idpf_tx_splitq_start - Selects the right Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ struct idpf_queue *tx_q;
+
+ if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+ }
+
+ tx_q = vport->txqs[skb_get_queue_mapping(skb)];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ return NETDEV_TX_OK;
+ }
+
+ return idpf_tx_splitq_frame(skb, tx_q);
+}
+
+/**
+ * idpf_ptype_to_htype - get a hash type
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
+ * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
+ * Rx desc.
+ */
+enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded)
+{
+ if (!decoded->known)
+ return PKT_HASH_TYPE_NONE;
+ if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
+ decoded->inner_prot)
+ return PKT_HASH_TYPE_L4;
+ if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
+ decoded->outer_ip)
+ return PKT_HASH_TYPE_L3;
+ if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2)
+ return PKT_HASH_TYPE_L2;
+
+ return PKT_HASH_TYPE_NONE;
+}
+
+/**
+ * idpf_rx_hash - set the hash value in the skb
+ * @rxq: Rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being populated
+ * @rx_desc: Receive descriptor
+ * @decoded: Decoded Rx packet type related fields
+ */
+static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
+ struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ u32 hash;
+
+ if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH)))
+ return;
+
+ hash = le16_to_cpu(rx_desc->hash1) |
+ (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
+ (rx_desc->hash3 << 24);
+
+ skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+}
+
+/**
+ * idpf_rx_csum - Indicate in skb if checksum is good
+ * @rxq: Rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being populated
+ * @csum_bits: checksum fields extracted from the descriptor
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * skb->protocol must be set before this function is called
+ */
+static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb,
+ struct idpf_rx_csum_decoded *csum_bits,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ bool ipv4, ipv6;
+
+ /* check if Rx checksum is enabled */
+ if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM)))
+ return;
+
+ /* check if HW has decoded the packet and checksum */
+ if (!(csum_bits->l3l4p))
+ return;
+
+ ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
+ ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+
+ if (ipv4 && (csum_bits->ipe || csum_bits->eipe))
+ goto checksum_fail;
+
+ if (ipv6 && csum_bits->ipv6exadd)
+ return;
+
+ /* check for L4 errors and handle packets that were not able to be
+ * checksummed
+ */
+ if (csum_bits->l4e)
+ goto checksum_fail;
+
+ /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
+ switch (decoded->inner_prot) {
+ case IDPF_RX_PTYPE_INNER_PROT_ICMP:
+ case IDPF_RX_PTYPE_INNER_PROT_TCP:
+ case IDPF_RX_PTYPE_INNER_PROT_UDP:
+ if (!csum_bits->raw_csum_inv) {
+ u16 csum = csum_bits->raw_csum;
+
+ skb->csum = csum_unfold((__force __sum16)~swab16(csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ } else {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ break;
+ case IDPF_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ break;
+ }
+
+ return;
+
+checksum_fail:
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_update_end(&rxq->stats_sync);
+}
+
+/**
+ * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
+ * @rx_desc: receive descriptor
+ * @csum: structure to extract checksum fields
+ *
+ **/
+static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct idpf_rx_csum_decoded *csum)
+{
+ u8 qword0, qword1;
+
+ qword0 = rx_desc->status_err0_qw0;
+ qword1 = rx_desc->status_err0_qw1;
+
+ csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ qword1);
+ csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
+ qword1);
+ csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
+ qword1);
+ csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
+ qword1);
+ csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
+ qword0);
+ csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M,
+ le16_to_cpu(rx_desc->ptype_err_fflags0));
+ csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+}
+
+/**
+ * idpf_rx_rsc - Set the RSC fields in the skb
+ * @rxq : Rx descriptor ring packet is being transacted on
+ * @skb : pointer to current skb being populated
+ * @rx_desc: Receive descriptor
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * Return 0 on success and error code on failure
+ *
+ * Populate the skb fields with the total number of RSC segments, RSC payload
+ * length and packet type.
+ */
+static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
+ struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ u16 rsc_segments, rsc_seg_len;
+ bool ipv4, ipv6;
+ int len;
+
+ if (unlikely(!decoded->outer_ip))
+ return -EINVAL;
+
+ rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
+ if (unlikely(!rsc_seg_len))
+ return -EINVAL;
+
+ ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
+ ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+
+ if (unlikely(!(ipv4 ^ ipv6)))
+ return -EINVAL;
+
+ rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
+ if (unlikely(rsc_segments == 1))
+ return 0;
+
+ NAPI_GRO_CB(skb)->count = rsc_segments;
+ skb_shinfo(skb)->gso_size = rsc_seg_len;
+
+ skb_reset_network_header(skb);
+ len = skb->len - skb_transport_offset(skb);
+
+ if (ipv4) {
+ struct iphdr *ipv4h = ip_hdr(skb);
+
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ /* Reset and set transport header offset in skb */
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+
+ /* Compute the TCP pseudo header checksum*/
+ tcp_hdr(skb)->check =
+ ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
+ } else {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ tcp_hdr(skb)->check =
+ ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
+ }
+
+ tcp_gro_complete(skb);
+
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.rsc_pkts);
+ u64_stats_update_end(&rxq->stats_sync);
+
+ return 0;
+}
+
+/**
+ * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rxq: Rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being populated
+ * @rx_desc: Receive descriptor
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, protocol, and
+ * other fields within the skb.
+ */
+static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
+ struct sk_buff *skb,
+ struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+{
+ struct idpf_rx_csum_decoded csum_bits = { };
+ struct idpf_rx_ptype_decoded decoded;
+ u16 rx_ptype;
+
+ rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M,
+ le16_to_cpu(rx_desc->ptype_err_fflags0));
+
+ decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
+ /* If we don't know the ptype we can't do anything else with it. Just
+ * pass it up the stack as-is.
+ */
+ if (!decoded.known)
+ return 0;
+
+ /* process RSS/hash */
+ idpf_rx_hash(rxq, skb, rx_desc, &decoded);
+
+ skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
+
+ if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M,
+ le16_to_cpu(rx_desc->hdrlen_flags)))
+ return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
+
+ idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
+ idpf_rx_csum(rxq, skb, &csum_bits, &decoded);
+
+ return 0;
+}
+
+/**
+ * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_buf: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
+ *
+ * This function will add the data contained in rx_buf->page to the skb.
+ * It will just attach the page as a frag to the skb.
+ * The function will then update the page offset.
+ */
+void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
+ unsigned int size)
+{
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
+ rx_buf->page_offset, size, rx_buf->truesize);
+
+ rx_buf->page = NULL;
+}
+
+/**
+ * idpf_rx_construct_skb - Allocate skb and populate it
+ * @rxq: Rx descriptor queue
+ * @rx_buf: Rx buffer to pull data from
+ * @size: the length of the packet
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
+ struct idpf_rx_buf *rx_buf,
+ unsigned int size)
+{
+ unsigned int headlen;
+ struct sk_buff *skb;
+ void *va;
+
+ va = page_address(rx_buf->page) + rx_buf->page_offset;
+
+ /* prefetch first cache line of first page */
+ net_prefetch(va);
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE,
+ GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ idpf_rx_put_page(rx_buf);
+
+ return NULL;
+ }
+
+ skb_record_rx_queue(skb, rxq->idx);
+ skb_mark_for_recycle(skb);
+
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > IDPF_RX_HDR_SIZE)
+ headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* if we exhaust the linear part then add what is left as a frag */
+ size -= headlen;
+ if (!size) {
+ idpf_rx_put_page(rx_buf);
+
+ return skb;
+ }
+
+ skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen,
+ size, rx_buf->truesize);
+
+ /* Since we're giving the page to the stack, clear our reference to it.
+ * We'll get a new one during buffer posting.
+ */
+ rx_buf->page = NULL;
+
+ return skb;
+}
+
+/**
+ * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer
+ * @rxq: Rx descriptor queue
+ * @va: Rx buffer to pull data from
+ * @size: the length of the packet
+ *
+ * This function allocates an skb. It then populates it with the page data from
+ * the current receive descriptor, taking care to set up the skb correctly.
+ * This specifically uses a header buffer to start building the skb.
+ */
+static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
+ const void *va,
+ unsigned int size)
+{
+ struct sk_buff *skb;
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_record_rx_queue(skb, rxq->idx);
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* More than likely, a payload fragment, which will use a page from
+ * page_pool will be added to the SKB so mark it for recycle
+ * preemptively. And if not, it's inconsequential.
+ */
+ skb_mark_for_recycle(skb);
+
+ return skb;
+}
+
+/**
+ * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
+ * status and error fields
+ * @stat_err_field: field from descriptor to test bits in
+ * @stat_err_bits: value to mask
+ *
+ */
+static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
+ const u8 stat_err_bits)
+{
+ return !!(stat_err_field & stat_err_bits);
+}
+
+/**
+ * idpf_rx_splitq_is_eop - process handling of EOP buffers
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * If the buffer is an EOP buffer, this function exits returning true,
+ * otherwise return false indicating that this is in fact a non-EOP buffer.
+ */
+static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+{
+ /* if we are the last buffer then there is nothing else to do */
+ return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
+ IDPF_RXD_EOF_SPLITQ));
+}
+
+/**
+ * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
+ * @rxq: Rx descriptor queue to retrieve receive buffer queue
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ */
+static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
+{
+ int total_rx_bytes = 0, total_rx_pkts = 0;
+ struct idpf_queue *rx_bufq = NULL;
+ struct sk_buff *skb = rxq->skb;
+ u16 ntc = rxq->next_to_clean;
+
+ /* Process Rx packets bounded by budget */
+ while (likely(total_rx_pkts < budget)) {
+ struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct idpf_sw_queue *refillq = NULL;
+ struct idpf_rxq_set *rxq_set = NULL;
+ struct idpf_rx_buf *rx_buf = NULL;
+ union virtchnl2_rx_desc *desc;
+ unsigned int pkt_len = 0;
+ unsigned int hdr_len = 0;
+ u16 gen_id, buf_id = 0;
+ /* Header buffer overflow only valid for header split */
+ bool hbo = false;
+ int bufq_id;
+ u8 rxdid;
+
+ /* get the Rx desc from Rx queue based on 'next_to_clean' */
+ desc = IDPF_RX_DESC(rxq, ntc);
+ rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc
+ */
+ dma_rmb();
+
+ /* if the descriptor isn't done, no work yet to do */
+ gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
+ gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id);
+
+ if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
+ break;
+
+ rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
+ rx_desc->rxdid_ucast);
+ if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
+ IDPF_RX_BUMP_NTC(rxq, ntc);
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.bad_descs);
+ u64_stats_update_end(&rxq->stats_sync);
+ continue;
+ }
+
+ pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
+ pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M,
+ pkt_len);
+
+ hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
+ rx_desc->status_err0_qw1);
+
+ if (unlikely(hbo)) {
+ /* If a header buffer overflow, occurs, i.e. header is
+ * too large to fit in the header split buffer, HW will
+ * put the entire packet, including headers, in the
+ * data/payload buffer.
+ */
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf);
+ u64_stats_update_end(&rxq->stats_sync);
+ goto bypass_hsplit;
+ }
+
+ hdr_len = le16_to_cpu(rx_desc->hdrlen_flags);
+ hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M,
+ hdr_len);
+
+bypass_hsplit:
+ bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
+ bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M,
+ bufq_id);
+
+ rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
+ if (!bufq_id)
+ refillq = rxq_set->refillq0;
+ else
+ refillq = rxq_set->refillq1;
+
+ /* retrieve buffer from the rxq */
+ rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq;
+
+ buf_id = le16_to_cpu(rx_desc->buf_id);
+
+ rx_buf = &rx_bufq->rx_buf.buf[buf_id];
+
+ if (hdr_len) {
+ const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va +
+ (u32)buf_id * IDPF_HDR_BUF_SIZE;
+
+ skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len);
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts);
+ u64_stats_update_end(&rxq->stats_sync);
+ }
+
+ if (pkt_len) {
+ idpf_rx_sync_for_cpu(rx_buf, pkt_len);
+ if (skb)
+ idpf_rx_add_frag(rx_buf, skb, pkt_len);
+ else
+ skb = idpf_rx_construct_skb(rxq, rx_buf,
+ pkt_len);
+ } else {
+ idpf_rx_put_page(rx_buf);
+ }
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
+
+ idpf_rx_post_buf_refill(refillq, buf_id);
+
+ IDPF_RX_BUMP_NTC(rxq, ntc);
+ /* skip if it is non EOP desc */
+ if (!idpf_rx_splitq_is_eop(rx_desc))
+ continue;
+
+ /* pad skb if needed (to make valid ethernet frame) */
+ if (eth_skb_pad(skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+
+ /* protocol */
+ if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ continue;
+ }
+
+ /* send completed skb up the stack */
+ napi_gro_receive(&rxq->q_vector->napi, skb);
+ skb = NULL;
+
+ /* update budget accounting */
+ total_rx_pkts++;
+ }
+
+ rxq->next_to_clean = ntc;
+
+ rxq->skb = skb;
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts);
+ u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_update_end(&rxq->stats_sync);
+
+ /* guarantee a trip back through this routine if there was a failure */
+ return total_rx_pkts;
+}
+
+/**
+ * idpf_rx_update_bufq_desc - Update buffer queue descriptor
+ * @bufq: Pointer to the buffer queue
+ * @refill_desc: SW Refill queue descriptor containing buffer ID
+ * @buf_desc: Buffer queue descriptor
+ *
+ * Return 0 on success and negative on failure.
+ */
+static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
+ struct virtchnl2_splitq_rx_buf_desc *buf_desc)
+{
+ struct idpf_rx_buf *buf;
+ dma_addr_t addr;
+ u16 buf_id;
+
+ buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+
+ buf = &bufq->rx_buf.buf[buf_id];
+
+ addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
+ if (unlikely(addr == DMA_MAPPING_ERROR))
+ return -ENOMEM;
+
+ buf_desc->pkt_addr = cpu_to_le64(addr);
+ buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
+
+ if (!bufq->rx_hsplit_en)
+ return 0;
+
+ buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
+ (u32)buf_id * IDPF_HDR_BUF_SIZE);
+
+ return 0;
+}
+
+/**
+ * idpf_rx_clean_refillq - Clean refill queue buffers
+ * @bufq: buffer queue to post buffers back to
+ * @refillq: refill queue to clean
+ *
+ * This function takes care of the buffer refill management
+ */
+static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
+ struct idpf_sw_queue *refillq)
+{
+ struct virtchnl2_splitq_rx_buf_desc *buf_desc;
+ u16 bufq_nta = bufq->next_to_alloc;
+ u16 ntc = refillq->next_to_clean;
+ int cleaned = 0;
+ u16 gen;
+
+ buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
+
+ /* make sure we stop at ring wrap in the unlikely case ring is full */
+ while (likely(cleaned < refillq->desc_count)) {
+ u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
+ bool failure;
+
+ gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
+ if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen)
+ break;
+
+ failure = idpf_rx_update_bufq_desc(bufq, refill_desc,
+ buf_desc);
+ if (failure)
+ break;
+
+ if (unlikely(++ntc == refillq->desc_count)) {
+ change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ ntc = 0;
+ }
+
+ if (unlikely(++bufq_nta == bufq->desc_count)) {
+ buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
+ bufq_nta = 0;
+ } else {
+ buf_desc++;
+ }
+
+ cleaned++;
+ }
+
+ if (!cleaned)
+ return;
+
+ /* We want to limit how many transactions on the bus we trigger with
+ * tail writes so we only do it in strides. It's also important we
+ * align the write to a multiple of 8 as required by HW.
+ */
+ if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
+ bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
+ idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
+ IDPF_RX_BUF_POST_STRIDE));
+
+ /* update next to alloc since we have filled the ring */
+ refillq->next_to_clean = ntc;
+ bufq->next_to_alloc = bufq_nta;
+}
+
+/**
+ * idpf_rx_clean_refillq_all - Clean all refill queues
+ * @bufq: buffer queue with refill queues
+ *
+ * Iterates through all refill queues assigned to the buffer queue assigned to
+ * this vector. Returns true if clean is complete within budget, false
+ * otherwise.
+ */
+static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq)
+{
+ struct idpf_bufq_set *bufq_set;
+ int i;
+
+ bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
+ for (i = 0; i < bufq_set->num_refillqs; i++)
+ idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
+}
+
+/**
+ * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ *
+ */
+static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
+ void *data)
+{
+ struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
+
+ q_vector->total_events++;
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
+ * @vport: virtual port structure
+ *
+ */
+static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
+{
+ u16 v_idx;
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
+ netif_napi_del(&vport->q_vectors[v_idx].napi);
+}
+
+/**
+ * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
+ napi_disable(&vport->q_vectors[v_idx].napi);
+}
+
+/**
+ * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
+ * @vport: virtual port
+ *
+ * Free the memory allocated for interrupt vectors associated to a vport
+ */
+void idpf_vport_intr_rel(struct idpf_vport *vport)
+{
+ int i, j, v_idx;
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+
+ kfree(q_vector->bufq);
+ q_vector->bufq = NULL;
+ kfree(q_vector->tx);
+ q_vector->tx = NULL;
+ kfree(q_vector->rx);
+ q_vector->rx = NULL;
+ }
+
+ /* Clean up the mapping of queues to vectors */
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++)
+ rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL;
+ else
+ for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
+ rx_qgrp->singleq.rxqs[j]->q_vector = NULL;
+ }
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ for (i = 0; i < vport->num_txq_grp; i++)
+ vport->txq_grps[i].complq->q_vector = NULL;
+ else
+ for (i = 0; i < vport->num_txq_grp; i++)
+ for (j = 0; j < vport->txq_grps[i].num_txq; j++)
+ vport->txq_grps[i].txqs[j]->q_vector = NULL;
+
+ kfree(vport->q_vectors);
+ vport->q_vectors = NULL;
+}
+
+/**
+ * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ int vector;
+
+ for (vector = 0; vector < vport->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ int irq_num, vidx;
+
+ /* free only the irqs that were actually requested */
+ if (!q_vector)
+ continue;
+
+ vidx = vport->q_vector_idxs[vector];
+ irq_num = adapter->msix_entries[vidx].vector;
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, q_vector);
+ }
+}
+
+/**
+ * idpf_vport_intr_dis_irq_all - Disable all interrupt
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
+{
+ struct idpf_q_vector *q_vector = vport->q_vectors;
+ int q_idx;
+
+ for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
+ writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
+}
+
+/**
+ * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
+ * @q_vector: pointer to q_vector
+ * @type: itr index
+ * @itr: itr value
+ */
+static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
+ const int type, u16 itr)
+{
+ u32 itr_val;
+
+ itr &= IDPF_ITR_MASK;
+ /* Don't clear PBA because that can cause lost interrupts that
+ * came in while we were cleaning/polling
+ */
+ itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
+ (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
+ (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
+
+ return itr_val;
+}
+
+/**
+ * idpf_update_dim_sample - Update dim sample with packets and bytes
+ * @q_vector: the vector associated with the interrupt
+ * @dim_sample: dim sample to update
+ * @dim: dim instance structure
+ * @packets: total packets
+ * @bytes: total bytes
+ *
+ * Update the dim sample with the packets and bytes which are passed to this
+ * function. Set the dim state appropriately if the dim settings gets stale.
+ */
+static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
+ struct dim_sample *dim_sample,
+ struct dim *dim, u64 packets, u64 bytes)
+{
+ dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
+ dim_sample->comp_ctr = 0;
+
+ /* if dim settings get stale, like when not updated for 1 second or
+ * longer, force it to start again. This addresses the frequent case
+ * of an idle queue being switched to by the scheduler.
+ */
+ if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
+ dim->state = DIM_START_MEASURE;
+}
+
+/**
+ * idpf_net_dim - Update net DIM algorithm
+ * @q_vector: the vector associated with the interrupt
+ *
+ * Create a DIM sample and notify net_dim() so that it can possibly decide
+ * a new ITR value based on incoming packets, bytes, and interrupts.
+ *
+ * This function is a no-op if the queue is not configured to dynamic ITR.
+ */
+static void idpf_net_dim(struct idpf_q_vector *q_vector)
+{
+ struct dim_sample dim_sample = { };
+ u64 packets, bytes;
+ u32 i;
+
+ if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
+ goto check_rx_itr;
+
+ for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
+ struct idpf_queue *txq = q_vector->tx[i];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&txq->stats_sync);
+ packets += u64_stats_read(&txq->q_stats.tx.packets);
+ bytes += u64_stats_read(&txq->q_stats.tx.bytes);
+ } while (u64_stats_fetch_retry(&txq->stats_sync, start));
+ }
+
+ idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
+ packets, bytes);
+ net_dim(&q_vector->tx_dim, dim_sample);
+
+check_rx_itr:
+ if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
+ return;
+
+ for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
+ struct idpf_queue *rxq = q_vector->rx[i];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&rxq->stats_sync);
+ packets += u64_stats_read(&rxq->q_stats.rx.packets);
+ bytes += u64_stats_read(&rxq->q_stats.rx.bytes);
+ } while (u64_stats_fetch_retry(&rxq->stats_sync, start));
+ }
+
+ idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
+ packets, bytes);
+ net_dim(&q_vector->rx_dim, dim_sample);
+}
+
+/**
+ * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ * Update the net_dim() algorithm and re-enable the interrupt associated with
+ * this vector.
+ */
+void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
+{
+ u32 intval;
+
+ /* net_dim() updates ITR out-of-band using a work item */
+ idpf_net_dim(q_vector);
+
+ intval = idpf_vport_intr_buildreg_itr(q_vector,
+ IDPF_NO_ITR_UPDATE_IDX, 0);
+
+ writel(intval, q_vector->intr_reg.dyn_ctl);
+}
+
+/**
+ * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
+ * @vport: main vport structure
+ * @basename: name for the vector
+ */
+static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ int vector, err, irq_num, vidx;
+ const char *vec_name;
+
+ for (vector = 0; vector < vport->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+
+ vidx = vport->q_vector_idxs[vector];
+ irq_num = adapter->msix_entries[vidx].vector;
+
+ if (q_vector->num_rxq && q_vector->num_txq)
+ vec_name = "TxRx";
+ else if (q_vector->num_rxq)
+ vec_name = "Rx";
+ else if (q_vector->num_txq)
+ vec_name = "Tx";
+ else
+ continue;
+
+ q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
+ basename, vec_name, vidx);
+
+ err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ netdev_err(vport->netdev,
+ "Request_irq failed, error: %d\n", err);
+ goto free_q_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_q_irqs:
+ while (--vector >= 0) {
+ vidx = vport->q_vector_idxs[vector];
+ irq_num = adapter->msix_entries[vidx].vector;
+ free_irq(irq_num, &vport->q_vectors[vector]);
+ }
+
+ return err;
+}
+
+/**
+ * idpf_vport_intr_write_itr - Write ITR value to the ITR register
+ * @q_vector: q_vector structure
+ * @itr: Interrupt throttling rate
+ * @tx: Tx or Rx ITR
+ */
+void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
+{
+ struct idpf_intr_reg *intr_reg;
+
+ if (tx && !q_vector->tx)
+ return;
+ else if (!tx && !q_vector->rx)
+ return;
+
+ intr_reg = &q_vector->intr_reg;
+ writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
+ tx ? intr_reg->tx_itr : intr_reg->rx_itr);
+}
+
+/**
+ * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
+{
+ bool dynamic;
+ int q_idx;
+ u16 itr;
+
+ for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
+
+ /* Set the initial ITR values */
+ if (qv->num_txq) {
+ dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
+ itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
+ idpf_vport_intr_write_itr(qv, dynamic ?
+ itr : qv->tx_itr_value,
+ true);
+ }
+
+ if (qv->num_rxq) {
+ dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
+ itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
+ idpf_vport_intr_write_itr(qv, dynamic ?
+ itr : qv->rx_itr_value,
+ false);
+ }
+
+ if (qv->num_txq || qv->num_rxq)
+ idpf_vport_intr_update_itr_ena_irq(qv);
+ }
+}
+
+/**
+ * idpf_vport_intr_deinit - Release all vector associations for the vport
+ * @vport: main vport structure
+ */
+void idpf_vport_intr_deinit(struct idpf_vport *vport)
+{
+ idpf_vport_intr_napi_dis_all(vport);
+ idpf_vport_intr_napi_del_all(vport);
+ idpf_vport_intr_dis_irq_all(vport);
+ idpf_vport_intr_rel_irq(vport);
+}
+
+/**
+ * idpf_tx_dim_work - Call back from the stack
+ * @work: work queue structure
+ */
+static void idpf_tx_dim_work(struct work_struct *work)
+{
+ struct idpf_q_vector *q_vector;
+ struct idpf_vport *vport;
+ struct dim *dim;
+ u16 itr;
+
+ dim = container_of(work, struct dim, work);
+ q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
+ vport = q_vector->vport;
+
+ if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
+ dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
+
+ /* look up the values in our local table */
+ itr = vport->tx_itr_profile[dim->profile_ix];
+
+ idpf_vport_intr_write_itr(q_vector, itr, true);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+/**
+ * idpf_rx_dim_work - Call back from the stack
+ * @work: work queue structure
+ */
+static void idpf_rx_dim_work(struct work_struct *work)
+{
+ struct idpf_q_vector *q_vector;
+ struct idpf_vport *vport;
+ struct dim *dim;
+ u16 itr;
+
+ dim = container_of(work, struct dim, work);
+ q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
+ vport = q_vector->vport;
+
+ if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
+ dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
+
+ /* look up the values in our local table */
+ itr = vport->rx_itr_profile[dim->profile_ix];
+
+ idpf_vport_intr_write_itr(q_vector, itr, false);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+/**
+ * idpf_init_dim - Set up dynamic interrupt moderation
+ * @qv: q_vector structure
+ */
+static void idpf_init_dim(struct idpf_q_vector *qv)
+{
+ INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
+ qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
+
+ INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
+ qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
+}
+
+/**
+ * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
+{
+ int q_idx;
+
+ for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
+
+ idpf_init_dim(q_vector);
+ napi_enable(&q_vector->napi);
+ }
+}
+
+/**
+ * idpf_tx_splitq_clean_all- Clean completion queues
+ * @q_vec: queue vector
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns false if clean is not complete else returns true
+ */
+static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
+ int budget, int *cleaned)
+{
+ u16 num_txq = q_vec->num_txq;
+ bool clean_complete = true;
+ int i, budget_per_q;
+
+ if (unlikely(!num_txq))
+ return true;
+
+ budget_per_q = DIV_ROUND_UP(budget, num_txq);
+ for (i = 0; i < num_txq; i++)
+ clean_complete &= idpf_tx_clean_complq(q_vec->tx[i],
+ budget_per_q, cleaned);
+
+ return clean_complete;
+}
+
+/**
+ * idpf_rx_splitq_clean_all- Clean completion queues
+ * @q_vec: queue vector
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns false if clean is not complete else returns true
+ */
+static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
+ int *cleaned)
+{
+ u16 num_rxq = q_vec->num_rxq;
+ bool clean_complete = true;
+ int pkts_cleaned = 0;
+ int i, budget_per_q;
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
+ for (i = 0; i < num_rxq; i++) {
+ struct idpf_queue *rxq = q_vec->rx[i];
+ int pkts_cleaned_per_q;
+
+ pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
+ /* if we clean as many as budgeted, we must not be done */
+ if (pkts_cleaned_per_q >= budget_per_q)
+ clean_complete = false;
+ pkts_cleaned += pkts_cleaned_per_q;
+ }
+ *cleaned = pkts_cleaned;
+
+ for (i = 0; i < q_vec->num_bufq; i++)
+ idpf_rx_clean_refillq_all(q_vec->bufq[i]);
+
+ return clean_complete;
+}
+
+/**
+ * idpf_vport_splitq_napi_poll - NAPI handler
+ * @napi: struct from which you get q_vector
+ * @budget: budget provided by stack
+ */
+static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct idpf_q_vector *q_vector =
+ container_of(napi, struct idpf_q_vector, napi);
+ bool clean_complete;
+ int work_done = 0;
+
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (unlikely(!budget)) {
+ idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
+
+ return 0;
+ }
+
+ clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
+ clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ work_done = min_t(int, work_done, budget - 1);
+
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ idpf_vport_intr_update_itr_ena_irq(q_vector);
+
+ /* Switch to poll mode in the tear-down path after sending disable
+ * queues virtchnl message, as the interrupts will be disabled after
+ * that
+ */
+ if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE,
+ q_vector->tx[0]->flags)))
+ return budget;
+ else
+ return work_done;
+}
+
+/**
+ * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
+ * @vport: virtual port
+ *
+ * Mapping for vectors to queues
+ */
+static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
+{
+ u16 num_txq_grp = vport->num_txq_grp;
+ int i, j, qv_idx, bufq_vidx = 0;
+ struct idpf_rxq_group *rx_qgrp;
+ struct idpf_txq_group *tx_qgrp;
+ struct idpf_queue *q, *bufq;
+ u16 q_index;
+
+ for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
+ u16 num_rxq;
+
+ rx_qgrp = &vport->rxq_grps[i];
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q_index = q->q_vector->num_rxq;
+ q->q_vector->rx[q_index] = q;
+ q->q_vector->num_rxq++;
+ qv_idx++;
+ }
+
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ bufq->q_vector = &vport->q_vectors[bufq_vidx];
+ q_index = bufq->q_vector->num_bufq;
+ bufq->q_vector->bufq[q_index] = bufq;
+ bufq->q_vector->num_bufq++;
+ }
+ if (++bufq_vidx >= vport->num_q_vectors)
+ bufq_vidx = 0;
+ }
+ }
+
+ for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
+ u16 num_txq;
+
+ tx_qgrp = &vport->txq_grps[i];
+ num_txq = tx_qgrp->num_txq;
+
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
+ q = tx_qgrp->complq;
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q_index = q->q_vector->num_txq;
+ q->q_vector->tx[q_index] = q;
+ q->q_vector->num_txq++;
+ qv_idx++;
+ } else {
+ for (j = 0; j < num_txq; j++) {
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
+ q = tx_qgrp->txqs[j];
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q_index = q->q_vector->num_txq;
+ q->q_vector->tx[q_index] = q;
+ q->q_vector->num_txq++;
+
+ qv_idx++;
+ }
+ }
+ }
+}
+
+/**
+ * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
+ * @vport: virtual port
+ *
+ * Initialize vector indexes with values returened over mailbox
+ */
+static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_alloc_vectors *ac;
+ u16 *vecids, total_vecs;
+ int i;
+
+ ac = adapter->req_vec_chunks;
+ if (!ac) {
+ for (i = 0; i < vport->num_q_vectors; i++)
+ vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+
+ return 0;
+ }
+
+ total_vecs = idpf_get_reserved_vecs(adapter);
+ vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
+ if (!vecids)
+ return -ENOMEM;
+
+ idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
+
+ for (i = 0; i < vport->num_q_vectors; i++)
+ vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+
+ kfree(vecids);
+
+ return 0;
+}
+
+/**
+ * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
+ * @vport: virtual port structure
+ */
+static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
+{
+ int (*napi_poll)(struct napi_struct *napi, int budget);
+ u16 v_idx;
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ napi_poll = idpf_vport_splitq_napi_poll;
+ else
+ napi_poll = idpf_vport_singleq_napi_poll;
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+
+ netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
+
+ /* only set affinity_mask if the CPU is online */
+ if (cpu_online(v_idx))
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ }
+}
+
+/**
+ * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
+ * @vport: virtual port
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+int idpf_vport_intr_alloc(struct idpf_vport *vport)
+{
+ u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
+ struct idpf_q_vector *q_vector;
+ int v_idx, err;
+
+ vport->q_vectors = kcalloc(vport->num_q_vectors,
+ sizeof(struct idpf_q_vector), GFP_KERNEL);
+ if (!vport->q_vectors)
+ return -ENOMEM;
+
+ txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors);
+ rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors);
+ bufqs_per_vector = vport->num_bufqs_per_qgrp *
+ DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors);
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
+ q_vector = &vport->q_vectors[v_idx];
+ q_vector->vport = vport;
+
+ q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
+ q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
+
+ q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
+ q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
+
+ q_vector->tx = kcalloc(txqs_per_vector,
+ sizeof(struct idpf_queue *),
+ GFP_KERNEL);
+ if (!q_vector->tx) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ q_vector->rx = kcalloc(rxqs_per_vector,
+ sizeof(struct idpf_queue *),
+ GFP_KERNEL);
+ if (!q_vector->rx) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ continue;
+
+ q_vector->bufq = kcalloc(bufqs_per_vector,
+ sizeof(struct idpf_queue *),
+ GFP_KERNEL);
+ if (!q_vector->bufq) {
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ idpf_vport_intr_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_vport_intr_init - Setup all vectors for the given vport
+ * @vport: virtual port
+ *
+ * Returns 0 on success or negative on failure
+ */
+int idpf_vport_intr_init(struct idpf_vport *vport)
+{
+ char *int_name;
+ int err;
+
+ err = idpf_vport_intr_init_vec_idx(vport);
+ if (err)
+ return err;
+
+ idpf_vport_intr_map_vector_to_qs(vport);
+ idpf_vport_intr_napi_add_all(vport);
+ idpf_vport_intr_napi_ena_all(vport);
+
+ err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
+ if (err)
+ goto unroll_vectors_alloc;
+
+ int_name = kasprintf(GFP_KERNEL, "%s-%s",
+ dev_driver_string(&vport->adapter->pdev->dev),
+ vport->netdev->name);
+
+ err = idpf_vport_intr_req_irq(vport, int_name);
+ if (err)
+ goto unroll_vectors_alloc;
+
+ idpf_vport_intr_ena_irq_all(vport);
+
+ return 0;
+
+unroll_vectors_alloc:
+ idpf_vport_intr_napi_dis_all(vport);
+ idpf_vport_intr_napi_del_all(vport);
+
+ return err;
+}
+
+/**
+ * idpf_config_rss - Send virtchnl messages to configure RSS
+ * @vport: virtual port
+ *
+ * Return 0 on success, negative on failure
+ */
+int idpf_config_rss(struct idpf_vport *vport)
+{
+ int err;
+
+ err = idpf_send_get_set_rss_key_msg(vport, false);
+ if (err)
+ return err;
+
+ return idpf_send_get_set_rss_lut_msg(vport, false);
+}
+
+/**
+ * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
+ * @vport: virtual port structure
+ */
+static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ u16 num_active_rxq = vport->num_rxq;
+ struct idpf_rss_data *rss_data;
+ int i;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+
+ for (i = 0; i < rss_data->rss_lut_size; i++) {
+ rss_data->rss_lut[i] = i % num_active_rxq;
+ rss_data->cached_lut[i] = rss_data->rss_lut[i];
+ }
+}
+
+/**
+ * idpf_init_rss - Allocate and initialize RSS resources
+ * @vport: virtual port
+ *
+ * Return 0 on success, negative on failure
+ */
+int idpf_init_rss(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_rss_data *rss_data;
+ u32 lut_size;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+
+ lut_size = rss_data->rss_lut_size * sizeof(u32);
+ rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
+ if (!rss_data->rss_lut)
+ return -ENOMEM;
+
+ rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
+ if (!rss_data->cached_lut) {
+ kfree(rss_data->rss_lut);
+ rss_data->rss_lut = NULL;
+
+ return -ENOMEM;
+ }
+
+ /* Fill the default RSS lut values */
+ idpf_fill_dflt_rss_lut(vport);
+
+ return idpf_config_rss(vport);
+}
+
+/**
+ * idpf_deinit_rss - Release RSS resources
+ * @vport: virtual port
+ */
+void idpf_deinit_rss(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_rss_data *rss_data;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ kfree(rss_data->cached_lut);
+ rss_data->cached_lut = NULL;
+ kfree(rss_data->rss_lut);
+ rss_data->rss_lut = NULL;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
new file mode 100644
index 000000000000..df76493faa75
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -0,0 +1,1023 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_TXRX_H_
+#define _IDPF_TXRX_H_
+
+#include <net/page_pool/helpers.h>
+#include <net/tcp.h>
+#include <net/netdev_queues.h>
+
+#define IDPF_LARGE_MAX_Q 256
+#define IDPF_MAX_Q 16
+#define IDPF_MIN_Q 2
+/* Mailbox Queue */
+#define IDPF_MAX_MBXQ 1
+
+#define IDPF_MIN_TXQ_DESC 64
+#define IDPF_MIN_RXQ_DESC 64
+#define IDPF_MIN_TXQ_COMPLQ_DESC 256
+#define IDPF_MAX_QIDS 256
+
+/* Number of descriptors in a queue should be a multiple of 32. RX queue
+ * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
+ * to achieve BufQ descriptors aligned to 32
+ */
+#define IDPF_REQ_DESC_MULTIPLE 32
+#define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
+#define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
+#define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
+
+#define IDPF_MAX_DESCS 8160
+#define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
+#define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
+#define MIN_SUPPORT_TXDID (\
+ VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
+ VIRTCHNL2_TXDID_FLEX_TSO_CTX)
+
+#define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1
+#define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1
+#define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4
+#define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4
+
+#define IDPF_COMPLQ_PER_GROUP 1
+#define IDPF_SINGLE_BUFQ_PER_RXQ_GRP 1
+#define IDPF_MAX_BUFQS_PER_RXQ_GRP 2
+#define IDPF_BUFQ2_ENA 1
+#define IDPF_NUMQ_PER_CHUNK 1
+
+#define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1
+#define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1
+
+/* Default vector sharing */
+#define IDPF_MBX_Q_VEC 1
+#define IDPF_MIN_Q_VEC 1
+
+#define IDPF_DFLT_TX_Q_DESC_COUNT 512
+#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
+#define IDPF_DFLT_RX_Q_DESC_COUNT 512
+
+/* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
+ * given RX completion queue has descriptors. This includes _ALL_ buffer
+ * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
+ * you have a total of 1024 buffers so your RX queue _must_ have at least that
+ * many descriptors. This macro divides a given number of RX descriptors by
+ * number of buffer queues to calculate how many descriptors each buffer queue
+ * can have without overrunning the RX queue.
+ *
+ * If you give hardware more buffers than completion descriptors what will
+ * happen is that if hardware gets a chance to post more than ring wrap of
+ * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
+ * in the descriptor will be wrong. Any overwritten descriptors' buffers will
+ * be gone forever and SW has no reasonable way to tell that this has happened.
+ * From SW perspective, when we finally get an interrupt, it looks like we're
+ * still waiting for descriptor to be done, stalling forever.
+ */
+#define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ))
+
+#define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
+
+#define IDPF_RX_BUMP_NTC(rxq, ntc) \
+do { \
+ if (unlikely(++(ntc) == (rxq)->desc_count)) { \
+ ntc = 0; \
+ change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \
+ } \
+} while (0)
+
+#define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \
+do { \
+ if (unlikely(++(idx) == (q)->desc_count)) \
+ idx = 0; \
+} while (0)
+
+#define IDPF_RX_HDR_SIZE 256
+#define IDPF_RX_BUF_2048 2048
+#define IDPF_RX_BUF_4096 4096
+#define IDPF_RX_BUF_STRIDE 32
+#define IDPF_RX_BUF_POST_STRIDE 16
+#define IDPF_LOW_WATERMARK 64
+/* Size of header buffer specifically for header split */
+#define IDPF_HDR_BUF_SIZE 256
+#define IDPF_PACKET_HDR_PAD \
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
+#define IDPF_TX_TSO_MIN_MSS 88
+
+/* Minimum number of descriptors between 2 descriptors with the RE bit set;
+ * only relevant in flow scheduling mode
+ */
+#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
+
+#define IDPF_RX_BI_BUFID_S 0
+#define IDPF_RX_BI_BUFID_M GENMASK(14, 0)
+#define IDPF_RX_BI_GEN_S 15
+#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S)
+#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
+#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
+
+#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
+ (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
+#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
+ (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
+#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
+
+#define IDPF_BASE_TX_DESC(txq, i) \
+ (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
+#define IDPF_BASE_TX_CTX_DESC(txq, i) \
+ (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
+#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \
+ (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
+
+#define IDPF_FLEX_TX_DESC(txq, i) \
+ (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
+#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
+ (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
+
+#define IDPF_DESC_UNUSED(txq) \
+ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
+ (txq)->next_to_clean - (txq)->next_to_use - 1)
+
+#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top)
+#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
+ (txq)->desc_count >> 2)
+
+#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
+/* Determine the absolute number of completions pending, i.e. the number of
+ * completions that are expected to arrive on the TX completion queue.
+ */
+#define IDPF_TX_COMPLQ_PENDING(txq) \
+ (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
+ 0 : U64_MAX) + \
+ (txq)->num_completions_pending - (txq)->complq->num_completions)
+
+#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
+#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
+/* Adjust the generation for the completion tag and wrap if necessary */
+#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
+ ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
+ 0 : (txq)->compl_tag_cur_gen)
+
+#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
+
+#define IDPF_TX_FLAGS_TSO BIT(0)
+#define IDPF_TX_FLAGS_IPV4 BIT(1)
+#define IDPF_TX_FLAGS_IPV6 BIT(2)
+#define IDPF_TX_FLAGS_TUNNEL BIT(3)
+
+union idpf_tx_flex_desc {
+ struct idpf_flex_tx_desc q; /* queue based scheduling */
+ struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
+};
+
+/**
+ * struct idpf_tx_buf
+ * @next_to_watch: Next descriptor to clean
+ * @skb: Pointer to the skb
+ * @dma: DMA address
+ * @len: DMA length
+ * @bytecount: Number of bytes
+ * @gso_segs: Number of GSO segments
+ * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
+ * with completion tag returned in buffer completion event.
+ * Because the completion tag is expected to be the same in all
+ * data descriptors for a given packet, and a single packet can
+ * span multiple buffers, we need this field to track all
+ * buffers associated with this completion tag independently of
+ * the buf_id. The tag consists of a N bit buf_id and M upper
+ * order "generation bits". See compl_tag_bufid_m and
+ * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
+ * to indicate the tag is not valid.
+ * @ctx_entry: Singleq only. Used to indicate the corresponding entry
+ * in the descriptor ring was used for a context descriptor and
+ * this buffer entry should be skipped.
+ */
+struct idpf_tx_buf {
+ void *next_to_watch;
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ unsigned int bytecount;
+ unsigned short gso_segs;
+
+ union {
+ int compl_tag;
+
+ bool ctx_entry;
+ };
+};
+
+struct idpf_tx_stash {
+ struct hlist_node hlist;
+ struct idpf_tx_buf buf;
+};
+
+/**
+ * struct idpf_buf_lifo - LIFO for managing OOO completions
+ * @top: Used to know how many buffers are left
+ * @size: Total size of LIFO
+ * @bufs: Backing array
+ */
+struct idpf_buf_lifo {
+ u16 top;
+ u16 size;
+ struct idpf_tx_stash **bufs;
+};
+
+/**
+ * struct idpf_tx_offload_params - Offload parameters for a given packet
+ * @tx_flags: Feature flags enabled for this packet
+ * @hdr_offsets: Offset parameter for single queue model
+ * @cd_tunneling: Type of tunneling enabled for single queue model
+ * @tso_len: Total length of payload to segment
+ * @mss: Segment size
+ * @tso_segs: Number of segments to be sent
+ * @tso_hdr_len: Length of headers to be duplicated
+ * @td_cmd: Command field to be inserted into descriptor
+ */
+struct idpf_tx_offload_params {
+ u32 tx_flags;
+
+ u32 hdr_offsets;
+ u32 cd_tunneling;
+
+ u32 tso_len;
+ u16 mss;
+ u16 tso_segs;
+ u16 tso_hdr_len;
+
+ u16 td_cmd;
+};
+
+/**
+ * struct idpf_tx_splitq_params
+ * @dtype: General descriptor info
+ * @eop_cmd: Type of EOP
+ * @compl_tag: Associated tag for completion
+ * @td_tag: Descriptor tunneling tag
+ * @offload: Offload parameters
+ */
+struct idpf_tx_splitq_params {
+ enum idpf_tx_desc_dtype_value dtype;
+ u16 eop_cmd;
+ union {
+ u16 compl_tag;
+ u16 td_tag;
+ };
+
+ struct idpf_tx_offload_params offload;
+};
+
+enum idpf_tx_ctx_desc_eipt_offload {
+ IDPF_TX_CTX_EXT_IP_NONE = 0x0,
+ IDPF_TX_CTX_EXT_IP_IPV6 = 0x1,
+ IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+/* Checksum offload bits decoded from the receive descriptor. */
+struct idpf_rx_csum_decoded {
+ u32 l3l4p : 1;
+ u32 ipe : 1;
+ u32 eipe : 1;
+ u32 eudpe : 1;
+ u32 ipv6exadd : 1;
+ u32 l4e : 1;
+ u32 pprs : 1;
+ u32 nat : 1;
+ u32 raw_csum_inv : 1;
+ u32 raw_csum : 16;
+};
+
+struct idpf_rx_extracted {
+ unsigned int size;
+ u16 rx_ptype;
+};
+
+#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
+#define IDPF_TX_MIN_PKT_LEN 17
+#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
+#define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \
+ sizeof(struct idpf_flex_tx_desc))
+#define IDPF_TX_DESCS_FOR_CTX 1
+/* TX descriptors needed, worst case */
+#define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
+ IDPF_TX_DESCS_PER_CACHE_LINE + \
+ IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K
+#define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
+#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
+ ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
+
+#define IDPF_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#define IDPF_RX_DESC(rxq, i) \
+ (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
+
+struct idpf_rx_buf {
+ struct page *page;
+ unsigned int page_offset;
+ u16 truesize;
+};
+
+#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
+#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
+ (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
+#define IDPF_RX_PTYPE_HDR_SZ sizeof(struct virtchnl2_get_ptype_info)
+#define IDPF_RX_MAX_PTYPES_PER_BUF \
+ DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
+ IDPF_RX_MAX_PTYPE_SZ)
+
+#define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
+
+#define IDPF_TUN_IP_GRE (\
+ IDPF_PTYPE_TUNNEL_IP |\
+ IDPF_PTYPE_TUNNEL_IP_GRENAT)
+
+#define IDPF_TUN_IP_GRE_MAC (\
+ IDPF_TUN_IP_GRE |\
+ IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
+
+#define IDPF_RX_MAX_PTYPE 1024
+#define IDPF_RX_MAX_BASE_PTYPE 256
+#define IDPF_INVALID_PTYPE_ID 0xFFFF
+
+/* Packet type non-ip values */
+enum idpf_rx_ptype_l2 {
+ IDPF_RX_PTYPE_L2_RESERVED = 0,
+ IDPF_RX_PTYPE_L2_MAC_PAY2 = 1,
+ IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ IDPF_RX_PTYPE_L2_FIP_PAY2 = 3,
+ IDPF_RX_PTYPE_L2_OUI_PAY2 = 4,
+ IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ IDPF_RX_PTYPE_L2_ECP_PAY2 = 7,
+ IDPF_RX_PTYPE_L2_EVB_PAY2 = 8,
+ IDPF_RX_PTYPE_L2_QCN_PAY2 = 9,
+ IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ IDPF_RX_PTYPE_L2_ARP = 11,
+};
+
+enum idpf_rx_ptype_outer_ip {
+ IDPF_RX_PTYPE_OUTER_L2 = 0,
+ IDPF_RX_PTYPE_OUTER_IP = 1,
+};
+
+#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \
+ (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \
+ ((ptype)->outer_ip_ver == (ipv)))
+
+enum idpf_rx_ptype_outer_ip_ver {
+ IDPF_RX_PTYPE_OUTER_NONE = 0,
+ IDPF_RX_PTYPE_OUTER_IPV4 = 1,
+ IDPF_RX_PTYPE_OUTER_IPV6 = 2,
+};
+
+enum idpf_rx_ptype_outer_fragmented {
+ IDPF_RX_PTYPE_NOT_FRAG = 0,
+ IDPF_RX_PTYPE_FRAG = 1,
+};
+
+enum idpf_rx_ptype_tunnel_type {
+ IDPF_RX_PTYPE_TUNNEL_NONE = 0,
+ IDPF_RX_PTYPE_TUNNEL_IP_IP = 1,
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum idpf_rx_ptype_tunnel_end_prot {
+ IDPF_RX_PTYPE_TUNNEL_END_NONE = 0,
+ IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum idpf_rx_ptype_inner_prot {
+ IDPF_RX_PTYPE_INNER_PROT_NONE = 0,
+ IDPF_RX_PTYPE_INNER_PROT_UDP = 1,
+ IDPF_RX_PTYPE_INNER_PROT_TCP = 2,
+ IDPF_RX_PTYPE_INNER_PROT_SCTP = 3,
+ IDPF_RX_PTYPE_INNER_PROT_ICMP = 4,
+ IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
+};
+
+enum idpf_rx_ptype_payload_layer {
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+enum idpf_tunnel_state {
+ IDPF_PTYPE_TUNNEL_IP = BIT(0),
+ IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
+ IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC = BIT(2),
+};
+
+struct idpf_ptype_state {
+ bool outer_ip;
+ bool outer_frag;
+ u8 tunnel_state;
+};
+
+struct idpf_rx_ptype_decoded {
+ u32 ptype:10;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:2;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+/**
+ * enum idpf_queue_flags_t
+ * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
+ * identify new descriptor writebacks on the ring. HW sets
+ * the gen bit to 1 on the first writeback of any given
+ * descriptor. After the ring wraps, HW sets the gen bit of
+ * those descriptors to 0, and continues flipping
+ * 0->1 or 1->0 on each ring wrap. SW maintains its own
+ * gen bit to know what value will indicate writebacks on
+ * the next pass around the ring. E.g. it is initialized
+ * to 1 and knows that reading a gen bit of 1 in any
+ * descriptor on the initial pass of the ring indicates a
+ * writeback. It also flips on every ring wrap.
+ * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit
+ * and RFLGQ_GEN is the SW bit.
+ * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
+ * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
+ * @__IDPF_Q_POLL_MODE: Enable poll mode
+ * @__IDPF_Q_FLAGS_NBITS: Must be last
+ */
+enum idpf_queue_flags_t {
+ __IDPF_Q_GEN_CHK,
+ __IDPF_RFLQ_GEN_CHK,
+ __IDPF_Q_FLOW_SCH_EN,
+ __IDPF_Q_SW_MARKER,
+ __IDPF_Q_POLL_MODE,
+
+ __IDPF_Q_FLAGS_NBITS,
+};
+
+/**
+ * struct idpf_vec_regs
+ * @dyn_ctl_reg: Dynamic control interrupt register offset
+ * @itrn_reg: Interrupt Throttling Rate register offset
+ * @itrn_index_spacing: Register spacing between ITR registers of the same
+ * vector
+ */
+struct idpf_vec_regs {
+ u32 dyn_ctl_reg;
+ u32 itrn_reg;
+ u32 itrn_index_spacing;
+};
+
+/**
+ * struct idpf_intr_reg
+ * @dyn_ctl: Dynamic control interrupt register
+ * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
+ * @dyn_ctl_itridx_s: Register bit offset for ITR index
+ * @dyn_ctl_itridx_m: Mask for ITR index
+ * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
+ * @rx_itr: RX ITR register
+ * @tx_itr: TX ITR register
+ * @icr_ena: Interrupt cause register offset
+ * @icr_ena_ctlq_m: Mask for ICR
+ */
+struct idpf_intr_reg {
+ void __iomem *dyn_ctl;
+ u32 dyn_ctl_intena_m;
+ u32 dyn_ctl_itridx_s;
+ u32 dyn_ctl_itridx_m;
+ u32 dyn_ctl_intrvl_s;
+ void __iomem *rx_itr;
+ void __iomem *tx_itr;
+ void __iomem *icr_ena;
+ u32 icr_ena_ctlq_m;
+};
+
+/**
+ * struct idpf_q_vector
+ * @vport: Vport back pointer
+ * @affinity_mask: CPU affinity mask
+ * @napi: napi handler
+ * @v_idx: Vector index
+ * @intr_reg: See struct idpf_intr_reg
+ * @num_txq: Number of TX queues
+ * @tx: Array of TX queues to service
+ * @tx_dim: Data for TX net_dim algorithm
+ * @tx_itr_value: TX interrupt throttling rate
+ * @tx_intr_mode: Dynamic ITR or not
+ * @tx_itr_idx: TX ITR index
+ * @num_rxq: Number of RX queues
+ * @rx: Array of RX queues to service
+ * @rx_dim: Data for RX net_dim algorithm
+ * @rx_itr_value: RX interrupt throttling rate
+ * @rx_intr_mode: Dynamic ITR or not
+ * @rx_itr_idx: RX ITR index
+ * @num_bufq: Number of buffer queues
+ * @bufq: Array of buffer queues to service
+ * @total_events: Number of interrupts processed
+ * @name: Queue vector name
+ */
+struct idpf_q_vector {
+ struct idpf_vport *vport;
+ cpumask_t affinity_mask;
+ struct napi_struct napi;
+ u16 v_idx;
+ struct idpf_intr_reg intr_reg;
+
+ u16 num_txq;
+ struct idpf_queue **tx;
+ struct dim tx_dim;
+ u16 tx_itr_value;
+ bool tx_intr_mode;
+ u32 tx_itr_idx;
+
+ u16 num_rxq;
+ struct idpf_queue **rx;
+ struct dim rx_dim;
+ u16 rx_itr_value;
+ bool rx_intr_mode;
+ u32 rx_itr_idx;
+
+ u16 num_bufq;
+ struct idpf_queue **bufq;
+
+ u16 total_events;
+ char *name;
+};
+
+struct idpf_rx_queue_stats {
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t rsc_pkts;
+ u64_stats_t hw_csum_err;
+ u64_stats_t hsplit_pkts;
+ u64_stats_t hsplit_buf_ovf;
+ u64_stats_t bad_descs;
+};
+
+struct idpf_tx_queue_stats {
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t lso_pkts;
+ u64_stats_t linearize;
+ u64_stats_t q_busy;
+ u64_stats_t skb_drops;
+ u64_stats_t dma_map_errs;
+};
+
+struct idpf_cleaned_stats {
+ u32 packets;
+ u32 bytes;
+};
+
+union idpf_queue_stats {
+ struct idpf_rx_queue_stats rx;
+ struct idpf_tx_queue_stats tx;
+};
+
+#define IDPF_ITR_DYNAMIC 1
+#define IDPF_ITR_MAX 0x1FE0
+#define IDPF_ITR_20K 0x0032
+#define IDPF_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
+#define IDPF_ITR_MASK 0x1FFE /* ITR register value alignment mask */
+#define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK)
+#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
+#define IDPF_ITR_TX_DEF IDPF_ITR_20K
+#define IDPF_ITR_RX_DEF IDPF_ITR_20K
+/* Index used for 'No ITR' update in DYN_CTL register */
+#define IDPF_NO_ITR_UPDATE_IDX 3
+#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
+#define IDPF_DIM_DEFAULT_PROFILE_IX 1
+
+/**
+ * struct idpf_queue
+ * @dev: Device back pointer for DMA mapping
+ * @vport: Back pointer to associated vport
+ * @txq_grp: See struct idpf_txq_group
+ * @rxq_grp: See struct idpf_rxq_group
+ * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
+ * buffer queue uses this index to determine which group of refill queues
+ * to clean.
+ * For TX queue, it is used as index to map between TX queue group and
+ * hot path TX pointers stored in vport. Used in both singleq/splitq.
+ * For RX queue, it is used to index to total RX queue across groups and
+ * used for skb reporting.
+ * @tail: Tail offset. Used for both queue models single and split. In splitq
+ * model relevant only for TX queue and RX queue.
+ * @tx_buf: See struct idpf_tx_buf
+ * @rx_buf: Struct with RX buffer related members
+ * @rx_buf.buf: See struct idpf_rx_buf
+ * @rx_buf.hdr_buf_pa: DMA handle
+ * @rx_buf.hdr_buf_va: Virtual address
+ * @pp: Page pool pointer
+ * @skb: Pointer to the skb
+ * @q_type: Queue type (TX, RX, TX completion, RX buffer)
+ * @q_id: Queue id
+ * @desc_count: Number of descriptors
+ * @next_to_use: Next descriptor to use. Relevant in both split & single txq
+ * and bufq.
+ * @next_to_clean: Next descriptor to clean. In split queue model, only
+ * relevant to TX completion queue and RX queue.
+ * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
+ * only relevant to RX queue.
+ * @flags: See enum idpf_queue_flags_t
+ * @q_stats: See union idpf_queue_stats
+ * @stats_sync: See struct u64_stats_sync
+ * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
+ * the TX completion queue, it can be for any TXQ associated
+ * with that completion queue. This means we can clean up to
+ * N TXQs during a single call to clean the completion queue.
+ * cleaned_bytes|pkts tracks the clean stats per TXQ during
+ * that single call to clean the completion queue. By doing so,
+ * we can update BQL with aggregate cleaned stats for each TXQ
+ * only once at the end of the cleaning routine.
+ * @cleaned_pkts: Number of packets cleaned for the above said case
+ * @rx_hsplit_en: RX headsplit enable
+ * @rx_hbuf_size: Header buffer size
+ * @rx_buf_size: Buffer size
+ * @rx_max_pkt_size: RX max packet size
+ * @rx_buf_stride: RX buffer stride
+ * @rx_buffer_low_watermark: RX buffer low watermark
+ * @rxdids: Supported RX descriptor ids
+ * @q_vector: Backreference to associated vector
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @desc_ring: Descriptor ring memory
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+ * @tx_min_pkt_len: Min supported packet length
+ * @num_completions: Only relevant for TX completion queue. It tracks the
+ * number of completions received to compare against the
+ * number of completions pending, as accumulated by the
+ * TX queues.
+ * @buf_stack: Stack of empty buffers to store buffer info for out of order
+ * buffer completions. See struct idpf_buf_lifo.
+ * @compl_tag_bufid_m: Completion tag buffer id mask
+ * @compl_tag_gen_s: Completion tag generation bit
+ * The format of the completion tag will change based on the TXQ
+ * descriptor ring size so that we can maintain roughly the same level
+ * of "uniqueness" across all descriptor sizes. For example, if the
+ * TXQ descriptor ring size is 64 (the minimum size supported), the
+ * completion tag will be formatted as below:
+ * 15 6 5 0
+ * --------------------------------
+ * | GEN=0-1023 |IDX = 0-63|
+ * --------------------------------
+ *
+ * This gives us 64*1024 = 65536 possible unique values. Similarly, if
+ * the TXQ descriptor ring size is 8160 (the maximum size supported),
+ * the completion tag will be formatted as below:
+ * 15 13 12 0
+ * --------------------------------
+ * |GEN | IDX = 0-8159 |
+ * --------------------------------
+ *
+ * This gives us 8*8160 = 65280 possible unique values.
+ * @compl_tag_cur_gen: Used to keep track of current completion tag generation
+ * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+ * @sched_buf_hash: Hash table to stores buffers
+ */
+struct idpf_queue {
+ struct device *dev;
+ struct idpf_vport *vport;
+ union {
+ struct idpf_txq_group *txq_grp;
+ struct idpf_rxq_group *rxq_grp;
+ };
+ u16 idx;
+ void __iomem *tail;
+ union {
+ struct idpf_tx_buf *tx_buf;
+ struct {
+ struct idpf_rx_buf *buf;
+ dma_addr_t hdr_buf_pa;
+ void *hdr_buf_va;
+ } rx_buf;
+ };
+ struct page_pool *pp;
+ struct sk_buff *skb;
+ u16 q_type;
+ u32 q_id;
+ u16 desc_count;
+
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+
+ union idpf_queue_stats q_stats;
+ struct u64_stats_sync stats_sync;
+
+ u32 cleaned_bytes;
+ u16 cleaned_pkts;
+
+ bool rx_hsplit_en;
+ u16 rx_hbuf_size;
+ u16 rx_buf_size;
+ u16 rx_max_pkt_size;
+ u16 rx_buf_stride;
+ u8 rx_buffer_low_watermark;
+ u64 rxdids;
+ struct idpf_q_vector *q_vector;
+ unsigned int size;
+ dma_addr_t dma;
+ void *desc_ring;
+
+ u16 tx_max_bufs;
+ u8 tx_min_pkt_len;
+
+ u32 num_completions;
+
+ struct idpf_buf_lifo buf_stack;
+
+ u16 compl_tag_bufid_m;
+ u16 compl_tag_gen_s;
+
+ u16 compl_tag_cur_gen;
+ u16 compl_tag_gen_max;
+
+ DECLARE_HASHTABLE(sched_buf_hash, 12);
+} ____cacheline_internodealigned_in_smp;
+
+/**
+ * struct idpf_sw_queue
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: Buffer to allocate at
+ * @flags: See enum idpf_queue_flags_t
+ * @ring: Pointer to the ring
+ * @desc_count: Descriptor count
+ * @dev: Device back pointer for DMA mapping
+ *
+ * Software queues are used in splitq mode to manage buffers between rxq
+ * producer and the bufq consumer. These are required in order to maintain a
+ * lockless buffer management system and are strictly software only constructs.
+ */
+struct idpf_sw_queue {
+ u16 next_to_clean;
+ u16 next_to_alloc;
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 *ring;
+ u16 desc_count;
+ struct device *dev;
+} ____cacheline_internodealigned_in_smp;
+
+/**
+ * struct idpf_rxq_set
+ * @rxq: RX queue
+ * @refillq0: Pointer to refill queue 0
+ * @refillq1: Pointer to refill queue 1
+ *
+ * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
+ * Each rxq needs a refillq to return used buffers back to the respective bufq.
+ * Bufqs then clean these refillqs for buffers to give to hardware.
+ */
+struct idpf_rxq_set {
+ struct idpf_queue rxq;
+ struct idpf_sw_queue *refillq0;
+ struct idpf_sw_queue *refillq1;
+};
+
+/**
+ * struct idpf_bufq_set
+ * @bufq: Buffer queue
+ * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
+ * in idpf_rxq_group.
+ * @refillqs: Pointer to refill queues array.
+ *
+ * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
+ * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
+ * Used buffers received by rxqs will be put on refillqs which bufqs will
+ * clean to return new buffers back to hardware.
+ *
+ * Buffers needed by some number of rxqs associated in this rxq_group are
+ * managed by at most two bufqs (depending on performance configuration).
+ */
+struct idpf_bufq_set {
+ struct idpf_queue bufq;
+ int num_refillqs;
+ struct idpf_sw_queue *refillqs;
+};
+
+/**
+ * struct idpf_rxq_group
+ * @vport: Vport back pointer
+ * @singleq: Struct with single queue related members
+ * @singleq.num_rxq: Number of RX queues associated
+ * @singleq.rxqs: Array of RX queue pointers
+ * @splitq: Struct with split queue related members
+ * @splitq.num_rxq_sets: Number of RX queue sets
+ * @splitq.rxq_sets: Array of RX queue sets
+ * @splitq.bufq_sets: Buffer queue set pointer
+ *
+ * In singleq mode, an rxq_group is simply an array of rxqs. In splitq, a
+ * rxq_group contains all the rxqs, bufqs and refillqs needed to
+ * manage buffers in splitq mode.
+ */
+struct idpf_rxq_group {
+ struct idpf_vport *vport;
+
+ union {
+ struct {
+ u16 num_rxq;
+ struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q];
+ } singleq;
+ struct {
+ u16 num_rxq_sets;
+ struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
+ struct idpf_bufq_set *bufq_sets;
+ } splitq;
+ };
+};
+
+/**
+ * struct idpf_txq_group
+ * @vport: Vport back pointer
+ * @num_txq: Number of TX queues associated
+ * @txqs: Array of TX queue pointers
+ * @complq: Associated completion queue pointer, split queue only
+ * @num_completions_pending: Total number of completions pending for the
+ * completion queue, acculumated for all TX queues
+ * associated with that completion queue.
+ *
+ * Between singleq and splitq, a txq_group is largely the same except for the
+ * complq. In splitq a single complq is responsible for handling completions
+ * for some number of txqs associated in this txq_group.
+ */
+struct idpf_txq_group {
+ struct idpf_vport *vport;
+
+ u16 num_txq;
+ struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
+
+ struct idpf_queue *complq;
+
+ u32 num_completions_pending;
+};
+
+/**
+ * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
+ * @size: transmit request size in bytes
+ *
+ * In the case where a large frag (>= 16K) needs to be split across multiple
+ * descriptors, we need to assume that we can have no more than 12K of data
+ * per descriptor due to hardware alignment restrictions (4K alignment).
+ */
+static inline u32 idpf_size_to_txd_count(unsigned int size)
+{
+ return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
+}
+
+/**
+ * idpf_tx_singleq_build_ctob - populate command tag offset and size
+ * @td_cmd: Command to be filled in desc
+ * @td_offset: Offset to be filled in desc
+ * @size: Size of the buffer
+ * @td_tag: td tag to be filled
+ *
+ * Returns the 64 bit value populated with the input parameters
+ */
+static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
+ unsigned int size, u64 td_tag)
+{
+ return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
+ (td_cmd << IDPF_TXD_QW1_CMD_S) |
+ (td_offset << IDPF_TXD_QW1_OFFSET_S) |
+ ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
+ (td_tag << IDPF_TXD_QW1_L2TAG1_S));
+}
+
+void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size);
+void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size);
+/**
+ * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
+ * @desc: descriptor to populate
+ * @params: pointer to tx params struct
+ * @td_cmd: command to be filled in desc
+ * @size: size of buffer
+ */
+static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size)
+{
+ if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
+ idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
+ else
+ idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
+}
+
+/**
+ * idpf_alloc_page - Allocate a new RX buffer from the page pool
+ * @pool: page_pool to allocate from
+ * @buf: metadata struct to populate with page info
+ * @buf_size: 2K or 4K
+ *
+ * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
+ */
+static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
+ struct idpf_rx_buf *buf,
+ unsigned int buf_size)
+{
+ if (buf_size == IDPF_RX_BUF_2048)
+ buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
+ buf_size);
+ else
+ buf->page = page_pool_dev_alloc_pages(pool);
+
+ if (!buf->page)
+ return DMA_MAPPING_ERROR;
+
+ buf->truesize = buf_size;
+
+ return page_pool_get_dma_addr(buf->page) + buf->page_offset +
+ pool->p.offset;
+}
+
+/**
+ * idpf_rx_put_page - Return RX buffer page to pool
+ * @rx_buf: RX buffer metadata struct
+ */
+static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
+{
+ page_pool_put_page(rx_buf->page->pp, rx_buf->page,
+ rx_buf->truesize, true);
+ rx_buf->page = NULL;
+}
+
+/**
+ * idpf_rx_sync_for_cpu - Synchronize DMA buffer
+ * @rx_buf: RX buffer metadata struct
+ * @len: frame length from descriptor
+ */
+static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
+{
+ struct page *page = rx_buf->page;
+ struct page_pool *pp = page->pp;
+
+ dma_sync_single_range_for_cpu(pp->p.dev,
+ page_pool_get_dma_addr(page),
+ rx_buf->page_offset + pp->p.offset, len,
+ page_pool_get_dma_dir(pp));
+}
+
+int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
+void idpf_vport_init_num_qs(struct idpf_vport *vport,
+ struct virtchnl2_create_vport *vport_msg);
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
+int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
+int idpf_vport_queues_alloc(struct idpf_vport *vport);
+void idpf_vport_queues_rel(struct idpf_vport *vport);
+void idpf_vport_intr_rel(struct idpf_vport *vport);
+int idpf_vport_intr_alloc(struct idpf_vport *vport);
+void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
+void idpf_vport_intr_deinit(struct idpf_vport *vport);
+int idpf_vport_intr_init(struct idpf_vport *vport);
+enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
+int idpf_config_rss(struct idpf_vport *vport);
+int idpf_init_rss(struct idpf_vport *vport);
+void idpf_deinit_rss(struct idpf_vport *vport);
+int idpf_rx_bufs_init_all(struct idpf_vport *vport);
+void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
+ unsigned int size);
+struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
+ struct idpf_rx_buf *rx_buf,
+ unsigned int size);
+bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
+void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
+void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+ bool xmit_more);
+unsigned int idpf_size_to_txd_count(unsigned int size);
+netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb);
+void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+ struct idpf_tx_buf *first, u16 ring_idx);
+unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+ struct sk_buff *skb);
+bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count);
+int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
+void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
+ struct net_device *netdev);
+netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
+ struct net_device *netdev);
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
+ u16 cleaned_count);
+int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+
+#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
new file mode 100644
index 000000000000..8ade4e3a9fe1
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_lan_vf_regs.h"
+
+#define IDPF_VF_ITR_IDX_SPACING 0x40
+
+/**
+ * idpf_vf_ctlq_reg_init - initialize default mailbox registers
+ * @cq: pointer to the array of create control queues
+ */
+static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+{
+ int i;
+
+ for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
+ struct idpf_ctlq_create_info *ccq = cq + i;
+
+ switch (ccq->type) {
+ case IDPF_CTLQ_TYPE_MAILBOX_TX:
+ /* set head and tail registers in our local struct */
+ ccq->reg.head = VF_ATQH;
+ ccq->reg.tail = VF_ATQT;
+ ccq->reg.len = VF_ATQLEN;
+ ccq->reg.bah = VF_ATQBAH;
+ ccq->reg.bal = VF_ATQBAL;
+ ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M;
+ ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
+ ccq->reg.head_mask = VF_ATQH_ATQH_M;
+ break;
+ case IDPF_CTLQ_TYPE_MAILBOX_RX:
+ /* set head and tail registers in our local struct */
+ ccq->reg.head = VF_ARQH;
+ ccq->reg.tail = VF_ARQT;
+ ccq->reg.len = VF_ARQLEN;
+ ccq->reg.bah = VF_ARQBAH;
+ ccq->reg.bal = VF_ARQBAL;
+ ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M;
+ ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
+ ccq->reg.head_mask = VF_ARQH_ARQH_M;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * idpf_vf_mb_intr_reg_init - Initialize the mailbox register
+ * @adapter: adapter structure
+ */
+static void idpf_vf_mb_intr_reg_init(struct idpf_adapter *adapter)
+{
+ struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
+ u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl);
+
+ intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl);
+ intr->dyn_ctl_intena_m = VF_INT_DYN_CTL0_INTENA_M;
+ intr->dyn_ctl_itridx_m = VF_INT_DYN_CTL0_ITR_INDX_M;
+ intr->icr_ena = idpf_get_reg_addr(adapter, VF_INT_ICR0_ENA1);
+ intr->icr_ena_ctlq_m = VF_INT_ICR0_ENA1_ADMINQ_M;
+}
+
+/**
+ * idpf_vf_intr_reg_init - Initialize interrupt registers
+ * @vport: virtual port structure
+ */
+static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ int num_vecs = vport->num_q_vectors;
+ struct idpf_vec_regs *reg_vals;
+ int num_regs, i, err = 0;
+ u32 rx_itr, tx_itr;
+ u16 total_vecs;
+
+ total_vecs = idpf_get_reserved_vecs(vport->adapter);
+ reg_vals = kcalloc(total_vecs, sizeof(struct idpf_vec_regs),
+ GFP_KERNEL);
+ if (!reg_vals)
+ return -ENOMEM;
+
+ num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ if (num_regs < num_vecs) {
+ err = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ for (i = 0; i < num_vecs; i++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[i];
+ u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_intr_reg *intr = &q_vector->intr_reg;
+ u32 spacing;
+
+ intr->dyn_ctl = idpf_get_reg_addr(adapter,
+ reg_vals[vec_id].dyn_ctl_reg);
+ intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M;
+ intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
+
+ spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
+ IDPF_VF_ITR_IDX_SPACING);
+ rx_itr = VF_INT_ITRN_ADDR(VIRTCHNL2_ITR_IDX_0,
+ reg_vals[vec_id].itrn_reg,
+ spacing);
+ tx_itr = VF_INT_ITRN_ADDR(VIRTCHNL2_ITR_IDX_1,
+ reg_vals[vec_id].itrn_reg,
+ spacing);
+ intr->rx_itr = idpf_get_reg_addr(adapter, rx_itr);
+ intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
+ }
+
+free_reg_vals:
+ kfree(reg_vals);
+
+ return err;
+}
+
+/**
+ * idpf_vf_reset_reg_init - Initialize reset registers
+ * @adapter: Driver specific private structure
+ */
+static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter)
+{
+ adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT);
+ adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M;
+}
+
+/**
+ * idpf_vf_trigger_reset - trigger reset
+ * @adapter: Driver specific private structure
+ * @trig_cause: Reason to trigger a reset
+ */
+static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
+ enum idpf_flags trig_cause)
+{
+ /* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
+ if (trig_cause == IDPF_HR_FUNC_RESET &&
+ !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL);
+}
+
+/**
+ * idpf_vf_reg_ops_init - Initialize register API function pointers
+ * @adapter: Driver specific private structure
+ */
+static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter)
+{
+ adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_vf_ctlq_reg_init;
+ adapter->dev_ops.reg_ops.intr_reg_init = idpf_vf_intr_reg_init;
+ adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_vf_mb_intr_reg_init;
+ adapter->dev_ops.reg_ops.reset_reg_init = idpf_vf_reset_reg_init;
+ adapter->dev_ops.reg_ops.trigger_reset = idpf_vf_trigger_reset;
+}
+
+/**
+ * idpf_vf_dev_ops_init - Initialize device API function pointers
+ * @adapter: Driver specific private structure
+ */
+void idpf_vf_dev_ops_init(struct idpf_adapter *adapter)
+{
+ idpf_vf_reg_ops_init(adapter);
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
new file mode 100644
index 000000000000..9bc85b2f1709
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -0,0 +1,3791 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+/**
+ * idpf_recv_event_msg - Receive virtchnl event message
+ * @vport: virtual port structure
+ * @ctlq_msg: message to copy from
+ *
+ * Receive virtchnl event message
+ */
+static void idpf_recv_event_msg(struct idpf_vport *vport,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct virtchnl2_event *v2e;
+ bool link_status;
+ u32 event;
+
+ v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
+ event = le32_to_cpu(v2e->event);
+
+ switch (event) {
+ case VIRTCHNL2_EVENT_LINK_CHANGE:
+ vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+ link_status = v2e->link_status;
+
+ if (vport->link_up == link_status)
+ break;
+
+ vport->link_up = link_status;
+ if (np->state == __IDPF_VPORT_UP) {
+ if (vport->link_up) {
+ netif_carrier_on(vport->netdev);
+ netif_tx_start_all_queues(vport->netdev);
+ } else {
+ netif_tx_stop_all_queues(vport->netdev);
+ netif_carrier_off(vport->netdev);
+ }
+ }
+ break;
+ default:
+ dev_err(&vport->adapter->pdev->dev,
+ "Unknown event %d from PF\n", event);
+ break;
+ }
+}
+
+/**
+ * idpf_mb_clean - Reclaim the send mailbox queue entries
+ * @adapter: Driver specific private structure
+ *
+ * Reclaim the send mailbox queue entries to be used to send further messages
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_mb_clean(struct idpf_adapter *adapter)
+{
+ u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
+ struct idpf_ctlq_msg **q_msg;
+ struct idpf_dma_mem *dma_mem;
+ int err;
+
+ q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);
+ if (!q_msg)
+ return -ENOMEM;
+
+ err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
+ if (err)
+ goto err_kfree;
+
+ for (i = 0; i < num_q_msg; i++) {
+ if (!q_msg[i])
+ continue;
+ dma_mem = q_msg[i]->ctx.indirect.payload;
+ if (dma_mem)
+ dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
+ dma_mem->va, dma_mem->pa);
+ kfree(q_msg[i]);
+ kfree(dma_mem);
+ }
+
+err_kfree:
+ kfree(q_msg);
+
+ return err;
+}
+
+/**
+ * idpf_send_mb_msg - Send message over mailbox
+ * @adapter: Driver specific private structure
+ * @op: virtchnl opcode
+ * @msg_size: size of the payload
+ * @msg: pointer to buffer holding the payload
+ *
+ * Will prepare the control queue message and initiates the send api
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg)
+{
+ struct idpf_ctlq_msg *ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ int err;
+
+ /* If we are here and a reset is detected nothing much can be
+ * done. This thread should silently abort and expected to
+ * be corrected with a new run either by user or driver
+ * flows after reset
+ */
+ if (idpf_is_reset_detected(adapter))
+ return 0;
+
+ err = idpf_mb_clean(adapter);
+ if (err)
+ return err;
+
+ ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);
+ if (!ctlq_msg)
+ return -ENOMEM;
+
+ dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);
+ if (!dma_mem) {
+ err = -ENOMEM;
+ goto dma_mem_error;
+ }
+
+ ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
+ ctlq_msg->func_id = 0;
+ ctlq_msg->data_len = msg_size;
+ ctlq_msg->cookie.mbx.chnl_opcode = op;
+ ctlq_msg->cookie.mbx.chnl_retval = 0;
+ dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
+ dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
+ &dma_mem->pa, GFP_ATOMIC);
+ if (!dma_mem->va) {
+ err = -ENOMEM;
+ goto dma_alloc_error;
+ }
+ memcpy(dma_mem->va, msg, msg_size);
+ ctlq_msg->ctx.indirect.payload = dma_mem;
+
+ err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
+ if (err)
+ goto send_error;
+
+ return 0;
+
+send_error:
+ dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
+ dma_mem->pa);
+dma_alloc_error:
+ kfree(dma_mem);
+dma_mem_error:
+ kfree(ctlq_msg);
+
+ return err;
+}
+
+/**
+ * idpf_find_vport - Find vport pointer from control queue message
+ * @adapter: driver specific private structure
+ * @vport: address of vport pointer to copy the vport from adapters vport list
+ * @ctlq_msg: control queue message
+ *
+ * Return 0 on success, error value on failure. Also this function does check
+ * for the opcodes which expect to receive payload and return error value if
+ * it is not the case.
+ */
+static int idpf_find_vport(struct idpf_adapter *adapter,
+ struct idpf_vport **vport,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ bool no_op = false, vid_found = false;
+ int i, err = 0;
+ char *vc_msg;
+ u32 v_id;
+
+ vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL);
+ if (!vc_msg)
+ return -ENOMEM;
+
+ if (ctlq_msg->data_len) {
+ size_t payload_size = ctlq_msg->ctx.indirect.payload->size;
+
+ if (!payload_size) {
+ dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n");
+ kfree(vc_msg);
+
+ return -EINVAL;
+ }
+
+ memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va,
+ min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
+ }
+
+ switch (ctlq_msg->cookie.mbx.chnl_opcode) {
+ case VIRTCHNL2_OP_VERSION:
+ case VIRTCHNL2_OP_GET_CAPS:
+ case VIRTCHNL2_OP_CREATE_VPORT:
+ case VIRTCHNL2_OP_SET_SRIOV_VFS:
+ case VIRTCHNL2_OP_ALLOC_VECTORS:
+ case VIRTCHNL2_OP_DEALLOC_VECTORS:
+ case VIRTCHNL2_OP_GET_PTYPE_INFO:
+ goto free_vc_msg;
+ case VIRTCHNL2_OP_ENABLE_VPORT:
+ case VIRTCHNL2_OP_DISABLE_VPORT:
+ case VIRTCHNL2_OP_DESTROY_VPORT:
+ v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
+ v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
+ v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_ENABLE_QUEUES:
+ case VIRTCHNL2_OP_DISABLE_QUEUES:
+ case VIRTCHNL2_OP_DEL_QUEUES:
+ v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_ADD_QUEUES:
+ v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
+ case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
+ v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_GET_STATS:
+ v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_GET_RSS_LUT:
+ case VIRTCHNL2_OP_SET_RSS_LUT:
+ v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_GET_RSS_KEY:
+ case VIRTCHNL2_OP_SET_RSS_KEY:
+ v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_EVENT:
+ v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_LOOPBACK:
+ v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
+ v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_ADD_MAC_ADDR:
+ case VIRTCHNL2_OP_DEL_MAC_ADDR:
+ v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
+ break;
+ default:
+ no_op = true;
+ break;
+ }
+
+ if (no_op)
+ goto free_vc_msg;
+
+ for (i = 0; i < idpf_get_max_vports(adapter); i++) {
+ if (adapter->vport_ids[i] == v_id) {
+ vid_found = true;
+ break;
+ }
+ }
+
+ if (vid_found)
+ *vport = adapter->vports[i];
+ else
+ err = -EINVAL;
+
+free_vc_msg:
+ kfree(vc_msg);
+
+ return err;
+}
+
+/**
+ * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer.
+ * @adapter: driver specific private structure
+ * @vport: virtual port structure
+ * @ctlq_msg: msg to copy from
+ * @err_enum: err bit to set on error
+ *
+ * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success,
+ * negative on failure.
+ */
+static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ struct idpf_ctlq_msg *ctlq_msg,
+ enum idpf_vport_vc_state err_enum)
+{
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ if (vport)
+ set_bit(err_enum, vport->vc_state);
+ else
+ set_bit(err_enum, adapter->vc_state);
+
+ return -EINVAL;
+ }
+
+ if (vport)
+ memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va,
+ min_t(int, ctlq_msg->ctx.indirect.payload->size,
+ IDPF_CTLQ_MAX_BUF_LEN));
+ else
+ memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
+ min_t(int, ctlq_msg->ctx.indirect.payload->size,
+ IDPF_CTLQ_MAX_BUF_LEN));
+
+ return 0;
+}
+
+/**
+ * idpf_recv_vchnl_op - helper function with common logic when handling the
+ * reception of VIRTCHNL OPs.
+ * @adapter: driver specific private structure
+ * @vport: virtual port structure
+ * @ctlq_msg: msg to copy from
+ * @state: state bit used on timeout check
+ * @err_state: err bit to set on error
+ */
+static void idpf_recv_vchnl_op(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ struct idpf_ctlq_msg *ctlq_msg,
+ enum idpf_vport_vc_state state,
+ enum idpf_vport_vc_state err_state)
+{
+ wait_queue_head_t *vchnl_wq;
+ int err;
+
+ if (vport)
+ vchnl_wq = &vport->vchnl_wq;
+ else
+ vchnl_wq = &adapter->vchnl_wq;
+
+ err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state);
+ if (wq_has_sleeper(vchnl_wq)) {
+ if (vport)
+ set_bit(state, vport->vc_state);
+ else
+ set_bit(state, adapter->vc_state);
+
+ wake_up(vchnl_wq);
+ } else {
+ if (!err) {
+ dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ } else {
+ /* Clear the errors since there is no sleeper to pass
+ * them on
+ */
+ if (vport)
+ clear_bit(err_state, vport->vc_state);
+ else
+ clear_bit(err_state, adapter->vc_state);
+ }
+ }
+}
+
+/**
+ * idpf_recv_mb_msg - Receive message over mailbox
+ * @adapter: Driver specific private structure
+ * @op: virtchannel operation code
+ * @msg: Received message holding buffer
+ * @msg_size: message size
+ *
+ * Will receive control queue message and posts the receive buffer. Returns 0
+ * on success and negative on failure.
+ */
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
+ void *msg, int msg_size)
+{
+ struct idpf_vport *vport = NULL;
+ struct idpf_ctlq_msg ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ bool work_done = false;
+ int num_retry = 2000;
+ u16 num_q_msg;
+ int err;
+
+ while (1) {
+ struct idpf_vport_config *vport_config;
+ int payload_size = 0;
+
+ /* Try to get one message */
+ num_q_msg = 1;
+ dma_mem = NULL;
+ err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
+ /* If no message then decide if we have to retry based on
+ * opcode
+ */
+ if (err || !num_q_msg) {
+ /* Increasing num_retry to consider the delayed
+ * responses because of large number of VF's mailbox
+ * messages. If the mailbox message is received from
+ * the other side, we come out of the sleep cycle
+ * immediately else we wait for more time.
+ */
+ if (!op || !num_retry--)
+ break;
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
+ err = -EIO;
+ break;
+ }
+ msleep(20);
+ continue;
+ }
+
+ /* If we are here a message is received. Check if we are looking
+ * for a specific message based on opcode. If it is different
+ * ignore and post buffers
+ */
+ if (op && ctlq_msg.cookie.mbx.chnl_opcode != op)
+ goto post_buffs;
+
+ err = idpf_find_vport(adapter, &vport, &ctlq_msg);
+ if (err)
+ goto post_buffs;
+
+ if (ctlq_msg.data_len)
+ payload_size = ctlq_msg.ctx.indirect.payload->size;
+
+ /* All conditions are met. Either a message requested is
+ * received or we received a message to be processed
+ */
+ switch (ctlq_msg.cookie.mbx.chnl_opcode) {
+ case VIRTCHNL2_OP_VERSION:
+ case VIRTCHNL2_OP_GET_CAPS:
+ if (ctlq_msg.cookie.mbx.chnl_retval) {
+ dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
+ ctlq_msg.cookie.mbx.chnl_opcode,
+ ctlq_msg.cookie.mbx.chnl_retval);
+ err = -EBADMSG;
+ } else if (msg) {
+ memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
+ min_t(int, payload_size, msg_size));
+ }
+ work_done = true;
+ break;
+ case VIRTCHNL2_OP_CREATE_VPORT:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_CREATE_VPORT,
+ IDPF_VC_CREATE_VPORT_ERR);
+ break;
+ case VIRTCHNL2_OP_ENABLE_VPORT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_ENA_VPORT,
+ IDPF_VC_ENA_VPORT_ERR);
+ break;
+ case VIRTCHNL2_OP_DISABLE_VPORT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DIS_VPORT,
+ IDPF_VC_DIS_VPORT_ERR);
+ break;
+ case VIRTCHNL2_OP_DESTROY_VPORT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DESTROY_VPORT,
+ IDPF_VC_DESTROY_VPORT_ERR);
+ break;
+ case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_CONFIG_TXQ,
+ IDPF_VC_CONFIG_TXQ_ERR);
+ break;
+ case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_CONFIG_RXQ,
+ IDPF_VC_CONFIG_RXQ_ERR);
+ break;
+ case VIRTCHNL2_OP_ENABLE_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_ENA_QUEUES,
+ IDPF_VC_ENA_QUEUES_ERR);
+ break;
+ case VIRTCHNL2_OP_DISABLE_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DIS_QUEUES,
+ IDPF_VC_DIS_QUEUES_ERR);
+ break;
+ case VIRTCHNL2_OP_ADD_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_ADD_QUEUES,
+ IDPF_VC_ADD_QUEUES_ERR);
+ break;
+ case VIRTCHNL2_OP_DEL_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DEL_QUEUES,
+ IDPF_VC_DEL_QUEUES_ERR);
+ break;
+ case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_MAP_IRQ,
+ IDPF_VC_MAP_IRQ_ERR);
+ break;
+ case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_UNMAP_IRQ,
+ IDPF_VC_UNMAP_IRQ_ERR);
+ break;
+ case VIRTCHNL2_OP_GET_STATS:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_GET_STATS,
+ IDPF_VC_GET_STATS_ERR);
+ break;
+ case VIRTCHNL2_OP_GET_RSS_LUT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_GET_RSS_LUT,
+ IDPF_VC_GET_RSS_LUT_ERR);
+ break;
+ case VIRTCHNL2_OP_SET_RSS_LUT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_SET_RSS_LUT,
+ IDPF_VC_SET_RSS_LUT_ERR);
+ break;
+ case VIRTCHNL2_OP_GET_RSS_KEY:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_GET_RSS_KEY,
+ IDPF_VC_GET_RSS_KEY_ERR);
+ break;
+ case VIRTCHNL2_OP_SET_RSS_KEY:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_SET_RSS_KEY,
+ IDPF_VC_SET_RSS_KEY_ERR);
+ break;
+ case VIRTCHNL2_OP_SET_SRIOV_VFS:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_SET_SRIOV_VFS,
+ IDPF_VC_SET_SRIOV_VFS_ERR);
+ break;
+ case VIRTCHNL2_OP_ALLOC_VECTORS:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_ALLOC_VECTORS,
+ IDPF_VC_ALLOC_VECTORS_ERR);
+ break;
+ case VIRTCHNL2_OP_DEALLOC_VECTORS:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_DEALLOC_VECTORS,
+ IDPF_VC_DEALLOC_VECTORS_ERR);
+ break;
+ case VIRTCHNL2_OP_GET_PTYPE_INFO:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_GET_PTYPE_INFO,
+ IDPF_VC_GET_PTYPE_INFO_ERR);
+ break;
+ case VIRTCHNL2_OP_LOOPBACK:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_LOOPBACK_STATE,
+ IDPF_VC_LOOPBACK_STATE_ERR);
+ break;
+ case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
+ /* This message can only be sent asynchronously. As
+ * such we'll have lost the context in which it was
+ * called and thus can only really report if it looks
+ * like an error occurred. Don't bother setting ERR bit
+ * or waking chnl_wq since no work queue will be waiting
+ * to read the message.
+ */
+ if (ctlq_msg.cookie.mbx.chnl_retval) {
+ dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
+ ctlq_msg.cookie.mbx.chnl_retval);
+ }
+ break;
+ case VIRTCHNL2_OP_ADD_MAC_ADDR:
+ vport_config = adapter->vport_config[vport->idx];
+ if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
+ vport_config->flags)) {
+ /* Message was sent asynchronously. We don't
+ * normally print errors here, instead
+ * prefer to handle errors in the function
+ * calling wait_for_event. However, if
+ * asynchronous, the context in which the
+ * message was sent is lost. We can't really do
+ * anything about at it this point, but we
+ * should at a minimum indicate that it looks
+ * like something went wrong. Also don't bother
+ * setting ERR bit or waking vchnl_wq since no
+ * one will be waiting to read the async
+ * message.
+ */
+ if (ctlq_msg.cookie.mbx.chnl_retval)
+ dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
+ ctlq_msg.cookie.mbx.chnl_retval);
+ break;
+ }
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_ADD_MAC_ADDR,
+ IDPF_VC_ADD_MAC_ADDR_ERR);
+ break;
+ case VIRTCHNL2_OP_DEL_MAC_ADDR:
+ vport_config = adapter->vport_config[vport->idx];
+ if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
+ vport_config->flags)) {
+ /* Message was sent asynchronously like the
+ * VIRTCHNL2_OP_ADD_MAC_ADDR
+ */
+ if (ctlq_msg.cookie.mbx.chnl_retval)
+ dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
+ ctlq_msg.cookie.mbx.chnl_retval);
+ break;
+ }
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DEL_MAC_ADDR,
+ IDPF_VC_DEL_MAC_ADDR_ERR);
+ break;
+ case VIRTCHNL2_OP_EVENT:
+ idpf_recv_event_msg(vport, &ctlq_msg);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev,
+ "Unhandled virtchnl response %d\n",
+ ctlq_msg.cookie.mbx.chnl_opcode);
+ break;
+ }
+
+post_buffs:
+ if (ctlq_msg.data_len)
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ else
+ num_q_msg = 0;
+
+ err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq,
+ &num_q_msg, &dma_mem);
+ /* If post failed clear the only buffer we supplied */
+ if (err && dma_mem)
+ dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
+ dma_mem->va, dma_mem->pa);
+
+ /* Applies only if we are looking for a specific opcode */
+ if (work_done)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * __idpf_wait_for_event - wrapper function for wait on virtchannel response
+ * @adapter: Driver private data structure
+ * @vport: virtual port structure
+ * @state: check on state upon timeout
+ * @err_check: check if this specific error bit is set
+ * @timeout: Max time to wait
+ *
+ * Checks if state is set upon expiry of timeout. Returns 0 on success,
+ * negative on failure.
+ */
+static int __idpf_wait_for_event(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ enum idpf_vport_vc_state state,
+ enum idpf_vport_vc_state err_check,
+ int timeout)
+{
+ int time_to_wait, num_waits;
+ wait_queue_head_t *vchnl_wq;
+ unsigned long *vc_state;
+
+ time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT);
+ num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT);
+
+ if (vport) {
+ vchnl_wq = &vport->vchnl_wq;
+ vc_state = vport->vc_state;
+ } else {
+ vchnl_wq = &adapter->vchnl_wq;
+ vc_state = adapter->vc_state;
+ }
+
+ while (num_waits) {
+ int event;
+
+ /* If we are here and a reset is detected do not wait but
+ * return. Reset timing is out of drivers control. So
+ * while we are cleaning resources as part of reset if the
+ * underlying HW mailbox is gone, wait on mailbox messages
+ * is not meaningful
+ */
+ if (idpf_is_reset_detected(adapter))
+ return 0;
+
+ event = wait_event_timeout(*vchnl_wq,
+ test_and_clear_bit(state, vc_state),
+ msecs_to_jiffies(time_to_wait));
+ if (event) {
+ if (test_and_clear_bit(err_check, vc_state)) {
+ dev_err(&adapter->pdev->dev, "VC response error %s\n",
+ idpf_vport_vc_state_str[err_check]);
+
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+ num_waits--;
+ }
+
+ /* Timeout occurred */
+ dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n",
+ idpf_vport_vc_state_str[state]);
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * idpf_min_wait_for_event - wait for virtchannel response
+ * @adapter: Driver private data structure
+ * @vport: virtual port structure
+ * @state: check on state upon timeout
+ * @err_check: check if this specific error bit is set
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_min_wait_for_event(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ enum idpf_vport_vc_state state,
+ enum idpf_vport_vc_state err_check)
+{
+ return __idpf_wait_for_event(adapter, vport, state, err_check,
+ IDPF_WAIT_FOR_EVENT_TIMEO_MIN);
+}
+
+/**
+ * idpf_wait_for_event - wait for virtchannel response
+ * @adapter: Driver private data structure
+ * @vport: virtual port structure
+ * @state: check on state upon timeout after 500ms
+ * @err_check: check if this specific error bit is set
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_wait_for_event(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ enum idpf_vport_vc_state state,
+ enum idpf_vport_vc_state err_check)
+{
+ /* Increasing the timeout in __IDPF_INIT_SW flow to consider large
+ * number of VF's mailbox message responses. When a message is received
+ * on mailbox, this thread is woken up by the idpf_recv_mb_msg before
+ * the timeout expires. Only in the error case i.e. if no message is
+ * received on mailbox, we wait for the complete timeout which is
+ * less likely to happen.
+ */
+ return __idpf_wait_for_event(adapter, vport, state, err_check,
+ IDPF_WAIT_FOR_EVENT_TIMEO);
+}
+
+/**
+ * idpf_wait_for_marker_event - wait for software marker response
+ * @vport: virtual port data structure
+ *
+ * Returns 0 success, negative on failure.
+ **/
+static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+{
+ int event;
+ int i;
+
+ for (i = 0; i < vport->num_txq; i++)
+ set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags);
+
+ event = wait_event_timeout(vport->sw_marker_wq,
+ test_and_clear_bit(IDPF_VPORT_SW_MARKER,
+ vport->flags),
+ msecs_to_jiffies(500));
+
+ for (i = 0; i < vport->num_txq; i++)
+ clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+
+ if (event)
+ return 0;
+
+ dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * idpf_send_ver_msg - send virtchnl version message
+ * @adapter: Driver specific private structure
+ *
+ * Send virtchnl version message. Returns 0 on success, negative on failure.
+ */
+static int idpf_send_ver_msg(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_version_info vvi;
+
+ if (adapter->virt_ver_maj) {
+ vvi.major = cpu_to_le32(adapter->virt_ver_maj);
+ vvi.minor = cpu_to_le32(adapter->virt_ver_min);
+ } else {
+ vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
+ vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
+ }
+
+ return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi),
+ (u8 *)&vvi);
+}
+
+/**
+ * idpf_recv_ver_msg - Receive virtchnl version message
+ * @adapter: Driver specific private structure
+ *
+ * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
+ * to send version message again, otherwise negative on failure.
+ */
+static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_version_info vvi;
+ u32 major, minor;
+ int err;
+
+ err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi,
+ sizeof(vvi));
+ if (err)
+ return err;
+
+ major = le32_to_cpu(vvi.major);
+ minor = le32_to_cpu(vvi.minor);
+
+ if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
+ dev_warn(&adapter->pdev->dev,
+ "Virtchnl major version (%d) greater than supported\n",
+ major);
+
+ return -EINVAL;
+ }
+
+ if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
+ minor > IDPF_VIRTCHNL_VERSION_MINOR)
+ dev_warn(&adapter->pdev->dev,
+ "Virtchnl minor version (%d) didn't match\n", minor);
+
+ /* If we have a mismatch, resend version to update receiver on what
+ * version we will use.
+ */
+ if (!adapter->virt_ver_maj &&
+ major != IDPF_VIRTCHNL_VERSION_MAJOR &&
+ minor != IDPF_VIRTCHNL_VERSION_MINOR)
+ err = -EAGAIN;
+
+ adapter->virt_ver_maj = major;
+ adapter->virt_ver_min = minor;
+
+ return err;
+}
+
+/**
+ * idpf_send_get_caps_msg - Send virtchnl get capabilities message
+ * @adapter: Driver specific private structure
+ *
+ * Send virtchl get capabilities message. Returns 0 on success, negative on
+ * failure.
+ */
+static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_get_capabilities caps = { };
+
+ caps.csum_caps =
+ cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |
+ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP |
+ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
+ VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
+ VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
+ VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
+ VIRTCHNL2_CAP_RX_CSUM_GENERIC);
+
+ caps.seg_caps =
+ cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP |
+ VIRTCHNL2_CAP_SEG_IPV4_UDP |
+ VIRTCHNL2_CAP_SEG_IPV4_SCTP |
+ VIRTCHNL2_CAP_SEG_IPV6_TCP |
+ VIRTCHNL2_CAP_SEG_IPV6_UDP |
+ VIRTCHNL2_CAP_SEG_IPV6_SCTP |
+ VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
+
+ caps.rss_caps =
+ cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP |
+ VIRTCHNL2_CAP_RSS_IPV4_UDP |
+ VIRTCHNL2_CAP_RSS_IPV4_SCTP |
+ VIRTCHNL2_CAP_RSS_IPV4_OTHER |
+ VIRTCHNL2_CAP_RSS_IPV6_TCP |
+ VIRTCHNL2_CAP_RSS_IPV6_UDP |
+ VIRTCHNL2_CAP_RSS_IPV6_SCTP |
+ VIRTCHNL2_CAP_RSS_IPV6_OTHER);
+
+ caps.hsplit_caps =
+ cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
+
+ caps.rsc_caps =
+ cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP |
+ VIRTCHNL2_CAP_RSC_IPV6_TCP);
+
+ caps.other_caps =
+ cpu_to_le64(VIRTCHNL2_CAP_SRIOV |
+ VIRTCHNL2_CAP_MACFILTER |
+ VIRTCHNL2_CAP_SPLITQ_QSCHED |
+ VIRTCHNL2_CAP_PROMISC |
+ VIRTCHNL2_CAP_LOOPBACK);
+
+ return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps),
+ (u8 *)&caps);
+}
+
+/**
+ * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message
+ * @adapter: Driver specific private structure
+ *
+ * Receive virtchnl get capabilities message. Returns 0 on success, negative on
+ * failure.
+ */
+static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
+{
+ return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
+ sizeof(struct virtchnl2_get_capabilities));
+}
+
+/**
+ * idpf_vport_alloc_max_qs - Allocate max queues for a vport
+ * @adapter: Driver specific private structure
+ * @max_q: vport max queue structure
+ */
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q)
+{
+ struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
+ struct virtchnl2_get_capabilities *caps = &adapter->caps;
+ u16 default_vports = idpf_get_default_vports(adapter);
+ int max_rx_q, max_tx_q;
+
+ mutex_lock(&adapter->queue_lock);
+
+ max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
+ max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
+ if (adapter->num_alloc_vports < default_vports) {
+ max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
+ max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
+ } else {
+ max_q->max_rxq = IDPF_MIN_Q;
+ max_q->max_txq = IDPF_MIN_Q;
+ }
+ max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ max_q->max_complq = max_q->max_txq;
+
+ if (avail_queues->avail_rxq < max_q->max_rxq ||
+ avail_queues->avail_txq < max_q->max_txq ||
+ avail_queues->avail_bufq < max_q->max_bufq ||
+ avail_queues->avail_complq < max_q->max_complq) {
+ mutex_unlock(&adapter->queue_lock);
+
+ return -EINVAL;
+ }
+
+ avail_queues->avail_rxq -= max_q->max_rxq;
+ avail_queues->avail_txq -= max_q->max_txq;
+ avail_queues->avail_bufq -= max_q->max_bufq;
+ avail_queues->avail_complq -= max_q->max_complq;
+
+ mutex_unlock(&adapter->queue_lock);
+
+ return 0;
+}
+
+/**
+ * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
+ * @adapter: Driver specific private structure
+ * @max_q: vport max queue structure
+ */
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q)
+{
+ struct idpf_avail_queue_info *avail_queues;
+
+ mutex_lock(&adapter->queue_lock);
+ avail_queues = &adapter->avail_queues;
+
+ avail_queues->avail_rxq += max_q->max_rxq;
+ avail_queues->avail_txq += max_q->max_txq;
+ avail_queues->avail_bufq += max_q->max_bufq;
+ avail_queues->avail_complq += max_q->max_complq;
+
+ mutex_unlock(&adapter->queue_lock);
+}
+
+/**
+ * idpf_init_avail_queues - Initialize available queues on the device
+ * @adapter: Driver specific private structure
+ */
+static void idpf_init_avail_queues(struct idpf_adapter *adapter)
+{
+ struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
+ struct virtchnl2_get_capabilities *caps = &adapter->caps;
+
+ avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
+ avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
+ avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
+ avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
+}
+
+/**
+ * idpf_get_reg_intr_vecs - Get vector queue register offset
+ * @vport: virtual port structure
+ * @reg_vals: Register offsets to store in
+ *
+ * Returns number of registers that got populated
+ */
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals)
+{
+ struct virtchnl2_vector_chunks *chunks;
+ struct idpf_vec_regs reg_val;
+ u16 num_vchunks, num_vec;
+ int num_regs = 0, i, j;
+
+ chunks = &vport->adapter->req_vec_chunks->vchunks;
+ num_vchunks = le16_to_cpu(chunks->num_vchunks);
+
+ for (j = 0; j < num_vchunks; j++) {
+ struct virtchnl2_vector_chunk *chunk;
+ u32 dynctl_reg_spacing;
+ u32 itrn_reg_spacing;
+
+ chunk = &chunks->vchunks[j];
+ num_vec = le16_to_cpu(chunk->num_vectors);
+ reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
+ reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
+ reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
+
+ dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
+ itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
+
+ for (i = 0; i < num_vec; i++) {
+ reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
+ reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
+ reg_vals[num_regs].itrn_index_spacing =
+ reg_val.itrn_index_spacing;
+
+ reg_val.dyn_ctl_reg += dynctl_reg_spacing;
+ reg_val.itrn_reg += itrn_reg_spacing;
+ num_regs++;
+ }
+ }
+
+ return num_regs;
+}
+
+/**
+ * idpf_vport_get_q_reg - Get the queue registers for the vport
+ * @reg_vals: register values needing to be set
+ * @num_regs: amount we expect to fill
+ * @q_type: queue model
+ * @chunks: queue regs received over mailbox
+ *
+ * This function parses the queue register offsets from the queue register
+ * chunk information, with a specific queue type and stores it into the array
+ * passed as an argument. It returns the actual number of queue registers that
+ * are filled.
+ */
+static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
+ struct virtchnl2_queue_reg_chunks *chunks)
+{
+ u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ int reg_filled = 0, i;
+ u32 reg_val;
+
+ while (num_chunks--) {
+ struct virtchnl2_queue_reg_chunk *chunk;
+ u16 num_q;
+
+ chunk = &chunks->chunks[num_chunks];
+ if (le32_to_cpu(chunk->type) != q_type)
+ continue;
+
+ num_q = le32_to_cpu(chunk->num_queues);
+ reg_val = le64_to_cpu(chunk->qtail_reg_start);
+ for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
+ reg_vals[reg_filled++] = reg_val;
+ reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
+ }
+ }
+
+ return reg_filled;
+}
+
+/**
+ * __idpf_queue_reg_init - initialize queue registers
+ * @vport: virtual port structure
+ * @reg_vals: registers we are initializing
+ * @num_regs: how many registers there are in total
+ * @q_type: queue model
+ *
+ * Return number of queues that are initialized
+ */
+static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
+ int num_regs, u32 q_type)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_queue *q;
+ int i, j, k = 0;
+
+ switch (q_type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
+ tx_qgrp->txqs[j]->tail =
+ idpf_get_reg_addr(adapter, reg_vals[k]);
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
+ q = rx_qgrp->singleq.rxqs[j];
+ q->tail = idpf_get_reg_addr(adapter,
+ reg_vals[k]);
+ }
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u8 num_bufqs = vport->num_bufqs_per_qgrp;
+
+ for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ q->tail = idpf_get_reg_addr(adapter,
+ reg_vals[k]);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return k;
+}
+
+/**
+ * idpf_queue_reg_init - initialize queue registers
+ * @vport: virtual port structure
+ *
+ * Return 0 on success, negative on failure
+ */
+int idpf_queue_reg_init(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_params;
+ struct virtchnl2_queue_reg_chunks *chunks;
+ struct idpf_vport_config *vport_config;
+ u16 vport_idx = vport->idx;
+ int num_regs, ret = 0;
+ u32 *reg_vals;
+
+ /* We may never deal with more than 256 same type of queues */
+ reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
+ if (!reg_vals)
+ return -ENOMEM;
+
+ vport_config = vport->adapter->vport_config[vport_idx];
+ if (vport_config->req_qs_chunks) {
+ struct virtchnl2_add_queues *vc_aq =
+ (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
+ chunks = &vc_aq->chunks;
+ } else {
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
+ chunks = &vport_params->chunks;
+ }
+
+ /* Initialize Tx queue tail register address */
+ num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
+ VIRTCHNL2_QUEUE_TYPE_TX,
+ chunks);
+ if (num_regs < vport->num_txq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ VIRTCHNL2_QUEUE_TYPE_TX);
+ if (num_regs < vport->num_txq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ /* Initialize Rx/buffer queue tail register address based on Rx queue
+ * model
+ */
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
+ VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
+ chunks);
+ if (num_regs < vport->num_bufq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+ if (num_regs < vport->num_bufq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+ } else {
+ num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
+ VIRTCHNL2_QUEUE_TYPE_RX,
+ chunks);
+ if (num_regs < vport->num_rxq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ VIRTCHNL2_QUEUE_TYPE_RX);
+ if (num_regs < vport->num_rxq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+ }
+
+free_reg_vals:
+ kfree(reg_vals);
+
+ return ret;
+}
+
+/**
+ * idpf_send_create_vport_msg - Send virtchnl create vport message
+ * @adapter: Driver specific private structure
+ * @max_q: vport max queue info
+ *
+ * send virtchnl creae vport message
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ u16 idx = adapter->next_vport;
+ int err, buf_size;
+
+ buf_size = sizeof(struct virtchnl2_create_vport);
+ if (!adapter->vport_params_reqd[idx]) {
+ adapter->vport_params_reqd[idx] = kzalloc(buf_size,
+ GFP_KERNEL);
+ if (!adapter->vport_params_reqd[idx])
+ return -ENOMEM;
+ }
+
+ vport_msg = adapter->vport_params_reqd[idx];
+ vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+ vport_msg->vport_index = cpu_to_le16(idx);
+
+ if (adapter->req_tx_splitq)
+ vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
+ else
+ vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+
+ if (adapter->req_rx_splitq)
+ vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
+ else
+ vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+
+ err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Enough queues are not available");
+
+ return err;
+ }
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
+ (u8 *)vport_msg);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
+ IDPF_VC_CREATE_VPORT_ERR);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
+
+ goto rel_lock;
+ }
+
+ if (!adapter->vport_params_recvd[idx]) {
+ adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
+ GFP_KERNEL);
+ if (!adapter->vport_params_recvd[idx]) {
+ err = -ENOMEM;
+ goto rel_lock;
+ }
+ }
+
+ vport_msg = adapter->vport_params_recvd[idx];
+ memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+
+rel_lock:
+ mutex_unlock(&adapter->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_check_supported_desc_ids - Verify we have required descriptor support
+ * @vport: virtual port structure
+ *
+ * Return 0 on success, error on failure
+ */
+int idpf_check_supported_desc_ids(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_create_vport *vport_msg;
+ u64 rx_desc_ids, tx_desc_ids;
+
+ vport_msg = adapter->vport_params_recvd[vport->idx];
+
+ rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
+ tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
+ dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
+ vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
+ }
+ } else {
+ if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
+ vport->base_rxd = true;
+ }
+
+ if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ return 0;
+
+ if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
+ dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
+ vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
+ * @vport: virtual port data structure
+ *
+ * Send virtchnl destroy vport message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_vport v_id;
+ int err;
+
+ v_id.vport_id = cpu_to_le32(vport->vport_id);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT,
+ sizeof(v_id), (u8 *)&v_id);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
+ IDPF_VC_DESTROY_VPORT_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_enable_vport_msg - Send virtchnl enable vport message
+ * @vport: virtual port data structure
+ *
+ * Send enable vport virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_enable_vport_msg(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_vport v_id;
+ int err;
+
+ v_id.vport_id = cpu_to_le32(vport->vport_id);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT,
+ sizeof(v_id), (u8 *)&v_id);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
+ IDPF_VC_ENA_VPORT_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_disable_vport_msg - Send virtchnl disable vport message
+ * @vport: virtual port data structure
+ *
+ * Send disable vport virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_disable_vport_msg(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_vport v_id;
+ int err;
+
+ v_id.vport_id = cpu_to_le32(vport->vport_id);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT,
+ sizeof(v_id), (u8 *)&v_id);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
+ IDPF_VC_DIS_VPORT_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
+ * @vport: virtual port data structure
+ *
+ * Send config tx queues virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+{
+ struct virtchnl2_config_tx_queues *ctq;
+ u32 config_sz, chunk_sz, buf_sz;
+ int totqs, num_msgs, num_chunks;
+ struct virtchnl2_txq_info *qi;
+ int err = 0, i, k = 0;
+
+ totqs = vport->num_txq + vport->num_complq;
+ qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
+ if (!qi)
+ return -ENOMEM;
+
+ /* Populate the queue info buffer with all queue context info */
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ int j;
+
+ for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
+ qi[k].queue_id =
+ cpu_to_le32(tx_qgrp->txqs[j]->q_id);
+ qi[k].model =
+ cpu_to_le16(vport->txq_model);
+ qi[k].type =
+ cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ qi[k].ring_len =
+ cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
+ qi[k].dma_ring_addr =
+ cpu_to_le64(tx_qgrp->txqs[j]->dma);
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ struct idpf_queue *q = tx_qgrp->txqs[j];
+
+ qi[k].tx_compl_queue_id =
+ cpu_to_le16(tx_qgrp->complq->q_id);
+ qi[k].relative_queue_id = cpu_to_le16(j);
+
+ if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags))
+ qi[k].sched_mode =
+ cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
+ else
+ qi[k].sched_mode =
+ cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
+ } else {
+ qi[k].sched_mode =
+ cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
+ }
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ continue;
+
+ qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
+ qi[k].model = cpu_to_le16(vport->txq_model);
+ qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
+ qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
+
+ k++;
+ }
+
+ /* Make sure accounting agrees */
+ if (k != totqs) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Chunk up the queue contexts into multiple messages to avoid
+ * sending a control queue message buffer that is too large
+ */
+ config_sz = sizeof(struct virtchnl2_config_tx_queues);
+ chunk_sz = sizeof(struct virtchnl2_txq_info);
+
+ num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
+ totqs);
+ num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+
+ buf_sz = struct_size(ctq, qinfo, num_chunks);
+ ctq = kzalloc(buf_sz, GFP_KERNEL);
+ if (!ctq) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ memset(ctq, 0, buf_sz);
+ ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
+
+ err = idpf_send_mb_msg(vport->adapter,
+ VIRTCHNL2_OP_CONFIG_TX_QUEUES,
+ buf_sz, (u8 *)ctq);
+ if (err)
+ goto mbx_error;
+
+ err = idpf_wait_for_event(vport->adapter, vport,
+ IDPF_VC_CONFIG_TXQ,
+ IDPF_VC_CONFIG_TXQ_ERR);
+ if (err)
+ goto mbx_error;
+
+ k += num_chunks;
+ totqs -= num_chunks;
+ num_chunks = min(num_chunks, totqs);
+ /* Recalculate buffer size */
+ buf_sz = struct_size(ctq, qinfo, num_chunks);
+ }
+
+mbx_error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(ctq);
+error:
+ kfree(qi);
+
+ return err;
+}
+
+/**
+ * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
+ * @vport: virtual port data structure
+ *
+ * Send config rx queues virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+{
+ struct virtchnl2_config_rx_queues *crq;
+ u32 config_sz, chunk_sz, buf_sz;
+ int totqs, num_msgs, num_chunks;
+ struct virtchnl2_rxq_info *qi;
+ int err = 0, i, k = 0;
+
+ totqs = vport->num_rxq + vport->num_bufq;
+ qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
+ if (!qi)
+ return -ENOMEM;
+
+ /* Populate the queue info buffer with all queue context info */
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq;
+ int j;
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ goto setup_rxqs;
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
+ struct idpf_queue *bufq =
+ &rx_qgrp->splitq.bufq_sets[j].bufq;
+
+ qi[k].queue_id = cpu_to_le32(bufq->q_id);
+ qi[k].model = cpu_to_le16(vport->rxq_model);
+ qi[k].type = cpu_to_le32(bufq->q_type);
+ qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
+ qi[k].ring_len = cpu_to_le16(bufq->desc_count);
+ qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
+ qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
+ qi[k].buffer_notif_stride = bufq->rx_buf_stride;
+ qi[k].rx_buffer_low_watermark =
+ cpu_to_le16(bufq->rx_buffer_low_watermark);
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+ }
+
+setup_rxqs:
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++, k++) {
+ struct idpf_queue *rxq;
+
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ rxq = rx_qgrp->singleq.rxqs[j];
+ goto common_qi_fields;
+ }
+ rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ qi[k].rx_bufq1_id =
+ cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id);
+ if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
+ qi[k].rx_bufq2_id =
+ cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id);
+ }
+ qi[k].rx_buffer_low_watermark =
+ cpu_to_le16(rxq->rx_buffer_low_watermark);
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+common_qi_fields:
+ if (rxq->rx_hsplit_en) {
+ qi[k].qflags |=
+ cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi[k].hdr_buffer_size =
+ cpu_to_le16(rxq->rx_hbuf_size);
+ }
+ qi[k].queue_id = cpu_to_le32(rxq->q_id);
+ qi[k].model = cpu_to_le16(vport->rxq_model);
+ qi[k].type = cpu_to_le32(rxq->q_type);
+ qi[k].ring_len = cpu_to_le16(rxq->desc_count);
+ qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
+ qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
+ qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
+ qi[k].qflags |=
+ cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
+ qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
+ }
+ }
+
+ /* Make sure accounting agrees */
+ if (k != totqs) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Chunk up the queue contexts into multiple messages to avoid
+ * sending a control queue message buffer that is too large
+ */
+ config_sz = sizeof(struct virtchnl2_config_rx_queues);
+ chunk_sz = sizeof(struct virtchnl2_rxq_info);
+
+ num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
+ totqs);
+ num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+
+ buf_sz = struct_size(crq, qinfo, num_chunks);
+ crq = kzalloc(buf_sz, GFP_KERNEL);
+ if (!crq) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ memset(crq, 0, buf_sz);
+ crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
+
+ err = idpf_send_mb_msg(vport->adapter,
+ VIRTCHNL2_OP_CONFIG_RX_QUEUES,
+ buf_sz, (u8 *)crq);
+ if (err)
+ goto mbx_error;
+
+ err = idpf_wait_for_event(vport->adapter, vport,
+ IDPF_VC_CONFIG_RXQ,
+ IDPF_VC_CONFIG_RXQ_ERR);
+ if (err)
+ goto mbx_error;
+
+ k += num_chunks;
+ totqs -= num_chunks;
+ num_chunks = min(num_chunks, totqs);
+ /* Recalculate buffer size */
+ buf_sz = struct_size(crq, qinfo, num_chunks);
+ }
+
+mbx_error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(crq);
+error:
+ kfree(qi);
+
+ return err;
+}
+
+/**
+ * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
+ * queues message
+ * @vport: virtual port data structure
+ * @vc_op: virtchnl op code to send
+ *
+ * Send enable or disable queues virtchnl message. Returns 0 on success,
+ * negative on failure.
+ */
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
+{
+ u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_del_ena_dis_queues *eq;
+ struct virtchnl2_queue_chunks *qcs;
+ struct virtchnl2_queue_chunk *qc;
+ u32 config_sz, chunk_sz, buf_sz;
+ int i, j, k = 0, err = 0;
+
+ /* validate virtchnl op */
+ switch (vc_op) {
+ case VIRTCHNL2_OP_ENABLE_QUEUES:
+ case VIRTCHNL2_OP_DISABLE_QUEUES:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ num_txq = vport->num_txq + vport->num_complq;
+ num_rxq = vport->num_rxq + vport->num_bufq;
+ num_q = num_txq + num_rxq;
+ buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
+ qc = kzalloc(buf_sz, GFP_KERNEL);
+ if (!qc)
+ return -ENOMEM;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
+ qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
+ qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ }
+ }
+ if (vport->num_txq != k) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ goto setup_rx;
+
+ for (i = 0; i < vport->num_txq_grp; i++, k++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
+ qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ }
+ if (vport->num_complq != (k - vport->num_txq)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+setup_rx:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++, k++) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ qc[k].start_queue_id =
+ cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
+ qc[k].type =
+ cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type);
+ } else {
+ qc[k].start_queue_id =
+ cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
+ qc[k].type =
+ cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type);
+ }
+ qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ }
+ }
+ if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ goto send_msg;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
+ struct idpf_queue *q;
+
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ qc[k].type = cpu_to_le32(q->q_type);
+ qc[k].start_queue_id = cpu_to_le32(q->q_id);
+ qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ }
+ }
+ if (vport->num_bufq != k - (vport->num_txq +
+ vport->num_complq +
+ vport->num_rxq)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+send_msg:
+ /* Chunk up the queue info into multiple messages */
+ config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
+ chunk_sz = sizeof(struct virtchnl2_queue_chunk);
+
+ num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
+ num_q);
+ num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+
+ buf_sz = struct_size(eq, chunks.chunks, num_chunks);
+ eq = kzalloc(buf_sz, GFP_KERNEL);
+ if (!eq) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ memset(eq, 0, buf_sz);
+ eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->chunks.num_chunks = cpu_to_le16(num_chunks);
+ qcs = &eq->chunks;
+ memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
+
+ err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq);
+ if (err)
+ goto mbx_error;
+
+ if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES)
+ err = idpf_wait_for_event(adapter, vport,
+ IDPF_VC_ENA_QUEUES,
+ IDPF_VC_ENA_QUEUES_ERR);
+ else
+ err = idpf_min_wait_for_event(adapter, vport,
+ IDPF_VC_DIS_QUEUES,
+ IDPF_VC_DIS_QUEUES_ERR);
+ if (err)
+ goto mbx_error;
+
+ k += num_chunks;
+ num_q -= num_chunks;
+ num_chunks = min(num_chunks, num_q);
+ /* Recalculate buffer size */
+ buf_sz = struct_size(eq, chunks.chunks, num_chunks);
+ }
+
+mbx_error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(eq);
+error:
+ kfree(qc);
+
+ return err;
+}
+
+/**
+ * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
+ * vector message
+ * @vport: virtual port data structure
+ * @map: true for map and false for unmap
+ *
+ * Send map or unmap queue vector virtchnl message. Returns 0 on success,
+ * negative on failure.
+ */
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector_maps *vqvm;
+ struct virtchnl2_queue_vector *vqv;
+ u32 config_sz, chunk_sz, buf_sz;
+ u32 num_msgs, num_chunks, num_q;
+ int i, j, k = 0, err = 0;
+
+ num_q = vport->num_txq + vport->num_rxq;
+
+ buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
+ vqv = kzalloc(buf_sz, GFP_KERNEL);
+ if (!vqv)
+ return -ENOMEM;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
+ vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
+
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ vqv[k].vector_id =
+ cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
+ vqv[k].itr_idx =
+ cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
+ } else {
+ vqv[k].vector_id =
+ cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
+ vqv[k].itr_idx =
+ cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
+ }
+ }
+ }
+
+ if (vport->num_txq != k) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++, k++) {
+ struct idpf_queue *rxq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ rxq = rx_qgrp->singleq.rxqs[j];
+
+ vqv[k].queue_type = cpu_to_le32(rxq->q_type);
+ vqv[k].queue_id = cpu_to_le32(rxq->q_id);
+ vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
+ vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
+ }
+ }
+
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ if (vport->num_rxq != k - vport->num_complq) {
+ err = -EINVAL;
+ goto error;
+ }
+ } else {
+ if (vport->num_rxq != k - vport->num_txq) {
+ err = -EINVAL;
+ goto error;
+ }
+ }
+
+ /* Chunk up the vector info into multiple messages */
+ config_sz = sizeof(struct virtchnl2_queue_vector_maps);
+ chunk_sz = sizeof(struct virtchnl2_queue_vector);
+
+ num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
+ num_q);
+ num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+
+ buf_sz = struct_size(vqvm, qv_maps, num_chunks);
+ vqvm = kzalloc(buf_sz, GFP_KERNEL);
+ if (!vqvm) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ memset(vqvm, 0, buf_sz);
+ vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->num_qv_maps = cpu_to_le16(num_chunks);
+ memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
+
+ if (map) {
+ err = idpf_send_mb_msg(adapter,
+ VIRTCHNL2_OP_MAP_QUEUE_VECTOR,
+ buf_sz, (u8 *)vqvm);
+ if (!err)
+ err = idpf_wait_for_event(adapter, vport,
+ IDPF_VC_MAP_IRQ,
+ IDPF_VC_MAP_IRQ_ERR);
+ } else {
+ err = idpf_send_mb_msg(adapter,
+ VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
+ buf_sz, (u8 *)vqvm);
+ if (!err)
+ err =
+ idpf_min_wait_for_event(adapter, vport,
+ IDPF_VC_UNMAP_IRQ,
+ IDPF_VC_UNMAP_IRQ_ERR);
+ }
+ if (err)
+ goto mbx_error;
+
+ k += num_chunks;
+ num_q -= num_chunks;
+ num_chunks = min(num_chunks, num_q);
+ /* Recalculate buffer size */
+ buf_sz = struct_size(vqvm, qv_maps, num_chunks);
+ }
+
+mbx_error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(vqvm);
+error:
+ kfree(vqv);
+
+ return err;
+}
+
+/**
+ * idpf_send_enable_queues_msg - send enable queues virtchnl message
+ * @vport: Virtual port private data structure
+ *
+ * Will send enable queues virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_enable_queues_msg(struct idpf_vport *vport)
+{
+ return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES);
+}
+
+/**
+ * idpf_send_disable_queues_msg - send disable queues virtchnl message
+ * @vport: Virtual port private data structure
+ *
+ * Will send disable queues virtchnl message. Returns 0 on success, negative
+ * on failure.
+ */
+int idpf_send_disable_queues_msg(struct idpf_vport *vport)
+{
+ int err, i;
+
+ err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
+ if (err)
+ return err;
+
+ /* switch to poll mode as interrupts will be disabled after disable
+ * queues virtchnl message is sent
+ */
+ for (i = 0; i < vport->num_txq; i++)
+ set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+
+ /* schedule the napi to receive all the marker packets */
+ for (i = 0; i < vport->num_q_vectors; i++)
+ napi_schedule(&vport->q_vectors[i].napi);
+
+ return idpf_wait_for_marker_event(vport);
+}
+
+/**
+ * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
+ * structure
+ * @dchunks: Destination chunks to store data to
+ * @schunks: Source chunks to copy data from
+ * @num_chunks: number of chunks to copy
+ */
+static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
+ struct virtchnl2_queue_reg_chunk *schunks,
+ u16 num_chunks)
+{
+ u16 i;
+
+ for (i = 0; i < num_chunks; i++) {
+ dchunks[i].type = schunks[i].type;
+ dchunks[i].start_queue_id = schunks[i].start_queue_id;
+ dchunks[i].num_queues = schunks[i].num_queues;
+ }
+}
+
+/**
+ * idpf_send_delete_queues_msg - send delete queues virtchnl message
+ * @vport: Virtual port private data structure
+ *
+ * Will send delete queues virtchnl message. Return 0 on success, negative on
+ * failure.
+ */
+int idpf_send_delete_queues_msg(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_create_vport *vport_params;
+ struct virtchnl2_queue_reg_chunks *chunks;
+ struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vport_config *vport_config;
+ u16 vport_idx = vport->idx;
+ int buf_size, err;
+ u16 num_chunks;
+
+ vport_config = adapter->vport_config[vport_idx];
+ if (vport_config->req_qs_chunks) {
+ struct virtchnl2_add_queues *vc_aq =
+ (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
+ chunks = &vc_aq->chunks;
+ } else {
+ vport_params = adapter->vport_params_recvd[vport_idx];
+ chunks = &vport_params->chunks;
+ }
+
+ num_chunks = le16_to_cpu(chunks->num_chunks);
+ buf_size = struct_size(eq, chunks.chunks, num_chunks);
+
+ eq = kzalloc(buf_size, GFP_KERNEL);
+ if (!eq)
+ return -ENOMEM;
+
+ eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->chunks.num_chunks = cpu_to_le16(num_chunks);
+
+ idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
+ num_chunks);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES,
+ buf_size, (u8 *)eq);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
+ IDPF_VC_DEL_QUEUES_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(eq);
+
+ return err;
+}
+
+/**
+ * idpf_send_config_queues_msg - Send config queues virtchnl message
+ * @vport: Virtual port private data structure
+ *
+ * Will send config queues virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_config_queues_msg(struct idpf_vport *vport)
+{
+ int err;
+
+ err = idpf_send_config_tx_queues_msg(vport);
+ if (err)
+ return err;
+
+ return idpf_send_config_rx_queues_msg(vport);
+}
+
+/**
+ * idpf_send_add_queues_msg - Send virtchnl add queues message
+ * @vport: Virtual port private data structure
+ * @num_tx_q: number of transmit queues
+ * @num_complq: number of transmit completion queues
+ * @num_rx_q: number of receive queues
+ * @num_rx_bufq: number of receive buffer queues
+ *
+ * Returns 0 on success, negative on failure. vport _MUST_ be const here as
+ * we should not change any fields within vport itself in this function.
+ */
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_add_queues aq = { };
+ struct virtchnl2_add_queues *vc_msg;
+ u16 vport_idx = vport->idx;
+ int size, err;
+
+ vport_config = adapter->vport_config[vport_idx];
+
+ aq.vport_id = cpu_to_le32(vport->vport_id);
+ aq.num_tx_q = cpu_to_le16(num_tx_q);
+ aq.num_tx_complq = cpu_to_le16(num_complq);
+ aq.num_rx_q = cpu_to_le16(num_rx_q);
+ aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
+
+ mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES,
+ sizeof(struct virtchnl2_add_queues), (u8 *)&aq);
+ if (err)
+ goto rel_lock;
+
+ /* We want vport to be const to prevent incidental code changes making
+ * changes to the vport config. We're making a special exception here
+ * to discard const to use the virtchnl.
+ */
+ err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
+ IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
+ if (err)
+ goto rel_lock;
+
+ kfree(vport_config->req_qs_chunks);
+ vport_config->req_qs_chunks = NULL;
+
+ vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
+ /* compare vc_msg num queues with vport num queues */
+ if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
+ le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
+ le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
+ le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) {
+ err = -EINVAL;
+ goto rel_lock;
+ }
+
+ size = struct_size(vc_msg, chunks.chunks,
+ le16_to_cpu(vc_msg->chunks.num_chunks));
+ vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
+ if (!vport_config->req_qs_chunks) {
+ err = -ENOMEM;
+ goto rel_lock;
+ }
+
+rel_lock:
+ mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
+ * @adapter: Driver specific private structure
+ * @num_vectors: number of vectors to be allocated
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
+{
+ struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec;
+ struct virtchnl2_alloc_vectors ac = { };
+ u16 num_vchunks;
+ int size, err;
+
+ ac.num_vectors = cpu_to_le16(num_vectors);
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS,
+ sizeof(ac), (u8 *)&ac);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS,
+ IDPF_VC_ALLOC_VECTORS_ERR);
+ if (err)
+ goto rel_lock;
+
+ rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
+ num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
+
+ size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
+ if (size > sizeof(adapter->vc_msg)) {
+ err = -EINVAL;
+ goto rel_lock;
+ }
+
+ kfree(adapter->req_vec_chunks);
+ adapter->req_vec_chunks = NULL;
+ adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL);
+ if (!adapter->req_vec_chunks) {
+ err = -ENOMEM;
+ goto rel_lock;
+ }
+
+ alloc_vec = adapter->req_vec_chunks;
+ if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
+ kfree(adapter->req_vec_chunks);
+ adapter->req_vec_chunks = NULL;
+ err = -EINVAL;
+ }
+
+rel_lock:
+ mutex_unlock(&adapter->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
+ * @adapter: Driver specific private structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
+ struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
+ int buf_size, err;
+
+ buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size,
+ (u8 *)vcs);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
+ IDPF_VC_DEALLOC_VECTORS_ERR);
+ if (err)
+ goto rel_lock;
+
+ kfree(adapter->req_vec_chunks);
+ adapter->req_vec_chunks = NULL;
+
+rel_lock:
+ mutex_unlock(&adapter->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_get_max_vfs - Get max number of vfs supported
+ * @adapter: Driver specific private structure
+ *
+ * Returns max number of VFs
+ */
+static int idpf_get_max_vfs(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.max_sriov_vfs);
+}
+
+/**
+ * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
+ * @adapter: Driver specific private structure
+ * @num_vfs: number of virtual functions to be created
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
+{
+ struct virtchnl2_sriov_vfs_info svi = { };
+ int err;
+
+ svi.num_vfs = cpu_to_le16(num_vfs);
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
+ sizeof(svi), (u8 *)&svi);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
+ IDPF_VC_SET_SRIOV_VFS_ERR);
+
+rel_lock:
+ mutex_unlock(&adapter->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_get_stats_msg - Send virtchnl get statistics message
+ * @vport: vport to get stats for
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_get_stats_msg(struct idpf_vport *vport)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct rtnl_link_stats64 *netstats = &np->netstats;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_vport_stats stats_msg = { };
+ struct virtchnl2_vport_stats *stats;
+ int err;
+
+ /* Don't send get_stats message if the link is down */
+ if (np->state <= __IDPF_VPORT_DOWN)
+ return 0;
+
+ stats_msg.vport_id = cpu_to_le32(vport->vport_id);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS,
+ sizeof(struct virtchnl2_vport_stats),
+ (u8 *)&stats_msg);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
+ IDPF_VC_GET_STATS_ERR);
+ if (err)
+ goto rel_lock;
+
+ stats = (struct virtchnl2_vport_stats *)vport->vc_msg;
+
+ spin_lock_bh(&np->stats_lock);
+
+ netstats->rx_packets = le64_to_cpu(stats->rx_unicast) +
+ le64_to_cpu(stats->rx_multicast) +
+ le64_to_cpu(stats->rx_broadcast);
+ netstats->rx_bytes = le64_to_cpu(stats->rx_bytes);
+ netstats->rx_dropped = le64_to_cpu(stats->rx_discards);
+ netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop);
+ netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length);
+
+ netstats->tx_packets = le64_to_cpu(stats->tx_unicast) +
+ le64_to_cpu(stats->tx_multicast) +
+ le64_to_cpu(stats->tx_broadcast);
+ netstats->tx_bytes = le64_to_cpu(stats->tx_bytes);
+ netstats->tx_errors = le64_to_cpu(stats->tx_errors);
+ netstats->tx_dropped = le64_to_cpu(stats->tx_discards);
+
+ vport->port_stats.vport_stats = *stats;
+
+ spin_unlock_bh(&np->stats_lock);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
+ * @vport: virtual port data structure
+ * @get: flag to set or get rss look up table
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_lut *recv_rl;
+ struct idpf_rss_data *rss_data;
+ struct virtchnl2_rss_lut *rl;
+ int buf_size, lut_buf_size;
+ int i, err;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
+ rl = kzalloc(buf_size, GFP_KERNEL);
+ if (!rl)
+ return -ENOMEM;
+
+ rl->vport_id = cpu_to_le32(vport->vport_id);
+ mutex_lock(&vport->vc_buf_lock);
+
+ if (!get) {
+ rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
+ for (i = 0; i < rss_data->rss_lut_size; i++)
+ rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT,
+ buf_size, (u8 *)rl);
+ if (err)
+ goto free_mem;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
+ IDPF_VC_SET_RSS_LUT_ERR);
+
+ goto free_mem;
+ }
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT,
+ buf_size, (u8 *)rl);
+ if (err)
+ goto free_mem;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
+ IDPF_VC_GET_RSS_LUT_ERR);
+ if (err)
+ goto free_mem;
+
+ recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg;
+ if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
+ goto do_memcpy;
+
+ rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
+ kfree(rss_data->rss_lut);
+
+ lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
+ rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
+ if (!rss_data->rss_lut) {
+ rss_data->rss_lut_size = 0;
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+do_memcpy:
+ memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size);
+free_mem:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(rl);
+
+ return err;
+}
+
+/**
+ * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
+ * @vport: virtual port data structure
+ * @get: flag to set or get rss look up table
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_key *recv_rk;
+ struct idpf_rss_data *rss_data;
+ struct virtchnl2_rss_key *rk;
+ int i, buf_size, err;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
+ rk = kzalloc(buf_size, GFP_KERNEL);
+ if (!rk)
+ return -ENOMEM;
+
+ rk->vport_id = cpu_to_le32(vport->vport_id);
+ mutex_lock(&vport->vc_buf_lock);
+
+ if (get) {
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY,
+ buf_size, (u8 *)rk);
+ if (err)
+ goto error;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY,
+ IDPF_VC_GET_RSS_KEY_ERR);
+ if (err)
+ goto error;
+
+ recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
+ if (rss_data->rss_key_size !=
+ le16_to_cpu(recv_rk->key_len)) {
+ rss_data->rss_key_size =
+ min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(recv_rk->key_len));
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = kzalloc(rss_data->rss_key_size,
+ GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ rss_data->rss_key_size = 0;
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+ memcpy(rss_data->rss_key, recv_rk->key_flex,
+ rss_data->rss_key_size);
+ } else {
+ rk->key_len = cpu_to_le16(rss_data->rss_key_size);
+ for (i = 0; i < rss_data->rss_key_size; i++)
+ rk->key_flex[i] = rss_data->rss_key[i];
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY,
+ buf_size, (u8 *)rk);
+ if (err)
+ goto error;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY,
+ IDPF_VC_SET_RSS_KEY_ERR);
+ }
+
+error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(rk);
+
+ return err;
+}
+
+/**
+ * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
+ * @ptype: ptype lookup table
+ * @pstate: state machine for ptype lookup table
+ * @ipv4: ipv4 or ipv6
+ * @frag: fragmentation allowed
+ *
+ */
+static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
+ struct idpf_ptype_state *pstate,
+ bool ipv4, bool frag)
+{
+ if (!pstate->outer_ip || !pstate->outer_frag) {
+ ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
+ pstate->outer_ip = true;
+
+ if (ipv4)
+ ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4;
+ else
+ ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6;
+
+ if (frag) {
+ ptype->outer_frag = IDPF_RX_PTYPE_FRAG;
+ pstate->outer_frag = true;
+ }
+ } else {
+ ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP;
+ pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
+
+ if (ipv4)
+ ptype->tunnel_end_prot =
+ IDPF_RX_PTYPE_TUNNEL_END_IPV4;
+ else
+ ptype->tunnel_end_prot =
+ IDPF_RX_PTYPE_TUNNEL_END_IPV6;
+
+ if (frag)
+ ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG;
+ }
+}
+
+/**
+ * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
+ * @vport: virtual port data structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
+{
+ struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
+ struct virtchnl2_get_ptype_info get_ptype_info;
+ int max_ptype, ptypes_recvd = 0, ptype_offset;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_get_ptype_info *ptype_info;
+ u16 next_ptype_id = 0;
+ int err = 0, i, j, k;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ max_ptype = IDPF_RX_MAX_PTYPE;
+ else
+ max_ptype = IDPF_RX_MAX_BASE_PTYPE;
+
+ memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+
+ ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!ptype_info)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ while (next_ptype_id < max_ptype) {
+ get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id);
+
+ if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
+ get_ptype_info.num_ptypes =
+ cpu_to_le16(max_ptype - next_ptype_id);
+ else
+ get_ptype_info.num_ptypes =
+ cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
+ sizeof(struct virtchnl2_get_ptype_info),
+ (u8 *)&get_ptype_info);
+ if (err)
+ goto vc_buf_unlock;
+
+ err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
+ IDPF_VC_GET_PTYPE_INFO_ERR);
+ if (err)
+ goto vc_buf_unlock;
+
+ memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+
+ ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
+ if (ptypes_recvd > max_ptype) {
+ err = -EINVAL;
+ goto vc_buf_unlock;
+ }
+
+ next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) +
+ le16_to_cpu(get_ptype_info.num_ptypes);
+
+ ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
+
+ for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
+ struct idpf_ptype_state pstate = { };
+ struct virtchnl2_ptype *ptype;
+ u16 id;
+
+ ptype = (struct virtchnl2_ptype *)
+ ((u8 *)ptype_info + ptype_offset);
+
+ ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
+ if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) {
+ err = -EINVAL;
+ goto vc_buf_unlock;
+ }
+
+ /* 0xFFFF indicates end of ptypes */
+ if (le16_to_cpu(ptype->ptype_id_10) ==
+ IDPF_INVALID_PTYPE_ID) {
+ err = 0;
+ goto vc_buf_unlock;
+ }
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ k = le16_to_cpu(ptype->ptype_id_10);
+ else
+ k = ptype->ptype_id_8;
+
+ if (ptype->proto_id_count)
+ ptype_lkup[k].known = 1;
+
+ for (j = 0; j < ptype->proto_id_count; j++) {
+ id = le16_to_cpu(ptype->proto_id[j]);
+ switch (id) {
+ case VIRTCHNL2_PROTO_HDR_GRE:
+ if (pstate.tunnel_state ==
+ IDPF_PTYPE_TUNNEL_IP) {
+ ptype_lkup[k].tunnel_type =
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_MAC:
+ ptype_lkup[k].outer_ip =
+ IDPF_RX_PTYPE_OUTER_L2;
+ if (pstate.tunnel_state ==
+ IDPF_TUN_IP_GRE) {
+ ptype_lkup[k].tunnel_type =
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4:
+ idpf_fill_ptype_lookup(&ptype_lkup[k],
+ &pstate, true,
+ false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6:
+ idpf_fill_ptype_lookup(&ptype_lkup[k],
+ &pstate, false,
+ false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+ idpf_fill_ptype_lookup(&ptype_lkup[k],
+ &pstate, true,
+ true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+ idpf_fill_ptype_lookup(&ptype_lkup[k],
+ &pstate, false,
+ true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_UDP:
+ ptype_lkup[k].inner_prot =
+ IDPF_RX_PTYPE_INNER_PROT_UDP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_TCP:
+ ptype_lkup[k].inner_prot =
+ IDPF_RX_PTYPE_INNER_PROT_TCP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_SCTP:
+ ptype_lkup[k].inner_prot =
+ IDPF_RX_PTYPE_INNER_PROT_SCTP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMP:
+ ptype_lkup[k].inner_prot =
+ IDPF_RX_PTYPE_INNER_PROT_ICMP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_PAY:
+ ptype_lkup[k].payload_layer =
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMPV6:
+ case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+ case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+ case VIRTCHNL2_PROTO_HDR_POST_MAC:
+ case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+ case VIRTCHNL2_PROTO_HDR_SVLAN:
+ case VIRTCHNL2_PROTO_HDR_CVLAN:
+ case VIRTCHNL2_PROTO_HDR_MPLS:
+ case VIRTCHNL2_PROTO_HDR_MMPLS:
+ case VIRTCHNL2_PROTO_HDR_PTP:
+ case VIRTCHNL2_PROTO_HDR_CTRL:
+ case VIRTCHNL2_PROTO_HDR_LLDP:
+ case VIRTCHNL2_PROTO_HDR_ARP:
+ case VIRTCHNL2_PROTO_HDR_ECP:
+ case VIRTCHNL2_PROTO_HDR_EAPOL:
+ case VIRTCHNL2_PROTO_HDR_PPPOD:
+ case VIRTCHNL2_PROTO_HDR_PPPOE:
+ case VIRTCHNL2_PROTO_HDR_IGMP:
+ case VIRTCHNL2_PROTO_HDR_AH:
+ case VIRTCHNL2_PROTO_HDR_ESP:
+ case VIRTCHNL2_PROTO_HDR_IKE:
+ case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+ case VIRTCHNL2_PROTO_HDR_L2TPV3:
+ case VIRTCHNL2_PROTO_HDR_GTP:
+ case VIRTCHNL2_PROTO_HDR_GTP_EH:
+ case VIRTCHNL2_PROTO_HDR_GTPCV2:
+ case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+ case VIRTCHNL2_PROTO_HDR_GTPU:
+ case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+ case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+ case VIRTCHNL2_PROTO_HDR_ECPRI:
+ case VIRTCHNL2_PROTO_HDR_VRRP:
+ case VIRTCHNL2_PROTO_HDR_OSPF:
+ case VIRTCHNL2_PROTO_HDR_TUN:
+ case VIRTCHNL2_PROTO_HDR_NVGRE:
+ case VIRTCHNL2_PROTO_HDR_VXLAN:
+ case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+ case VIRTCHNL2_PROTO_HDR_GENEVE:
+ case VIRTCHNL2_PROTO_HDR_NSH:
+ case VIRTCHNL2_PROTO_HDR_QUIC:
+ case VIRTCHNL2_PROTO_HDR_PFCP:
+ case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+ case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+ case VIRTCHNL2_PROTO_HDR_RTP:
+ case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+vc_buf_unlock:
+ mutex_unlock(&adapter->vc_buf_lock);
+ kfree(ptype_info);
+
+ return err;
+}
+
+/**
+ * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
+ * message
+ * @vport: virtual port data structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
+{
+ struct virtchnl2_loopback loopback;
+ int err;
+
+ loopback.vport_id = cpu_to_le32(vport->vport_id);
+ loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK,
+ sizeof(loopback), (u8 *)&loopback);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(vport->adapter, vport,
+ IDPF_VC_LOOPBACK_STATE,
+ IDPF_VC_LOOPBACK_STATE_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_find_ctlq - Given a type and id, find ctlq info
+ * @hw: hardware struct
+ * @type: type of ctrlq to find
+ * @id: ctlq id to find
+ *
+ * Returns pointer to found ctlq info struct, NULL otherwise.
+ */
+static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
+ enum idpf_ctlq_type type, int id)
+{
+ struct idpf_ctlq_info *cq, *tmp;
+
+ list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
+ if (cq->q_id == id && cq->cq_type == type)
+ return cq;
+
+ return NULL;
+}
+
+/**
+ * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
+ * @adapter: adapter info struct
+ *
+ * Returns 0 on success, negative otherwise
+ */
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
+{
+ struct idpf_ctlq_create_info ctlq_info[] = {
+ {
+ .type = IDPF_CTLQ_TYPE_MAILBOX_TX,
+ .id = IDPF_DFLT_MBX_ID,
+ .len = IDPF_DFLT_MBX_Q_LEN,
+ .buf_size = IDPF_CTLQ_MAX_BUF_LEN
+ },
+ {
+ .type = IDPF_CTLQ_TYPE_MAILBOX_RX,
+ .id = IDPF_DFLT_MBX_ID,
+ .len = IDPF_DFLT_MBX_Q_LEN,
+ .buf_size = IDPF_CTLQ_MAX_BUF_LEN
+ }
+ };
+ struct idpf_hw *hw = &adapter->hw;
+ int err;
+
+ adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
+
+ err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
+ if (err)
+ return err;
+
+ hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
+ IDPF_DFLT_MBX_ID);
+ hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
+ IDPF_DFLT_MBX_ID);
+
+ if (!hw->asq || !hw->arq) {
+ idpf_ctlq_deinit(hw);
+
+ return -ENOENT;
+ }
+
+ adapter->state = __IDPF_STARTUP;
+
+ return 0;
+}
+
+/**
+ * idpf_deinit_dflt_mbx - Free up ctlqs setup
+ * @adapter: Driver specific private data structure
+ */
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
+{
+ if (adapter->hw.arq && adapter->hw.asq) {
+ idpf_mb_clean(adapter);
+ idpf_ctlq_deinit(&adapter->hw);
+ }
+ adapter->hw.arq = NULL;
+ adapter->hw.asq = NULL;
+}
+
+/**
+ * idpf_vport_params_buf_rel - Release memory for MailBox resources
+ * @adapter: Driver specific private data structure
+ *
+ * Will release memory to hold the vport parameters received on MailBox
+ */
+static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
+{
+ kfree(adapter->vport_params_recvd);
+ adapter->vport_params_recvd = NULL;
+ kfree(adapter->vport_params_reqd);
+ adapter->vport_params_reqd = NULL;
+ kfree(adapter->vport_ids);
+ adapter->vport_ids = NULL;
+}
+
+/**
+ * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
+ * @adapter: Driver specific private data structure
+ *
+ * Will alloc memory to hold the vport parameters received on MailBox
+ */
+static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
+{
+ u16 num_max_vports = idpf_get_max_vports(adapter);
+
+ adapter->vport_params_reqd = kcalloc(num_max_vports,
+ sizeof(*adapter->vport_params_reqd),
+ GFP_KERNEL);
+ if (!adapter->vport_params_reqd)
+ return -ENOMEM;
+
+ adapter->vport_params_recvd = kcalloc(num_max_vports,
+ sizeof(*adapter->vport_params_recvd),
+ GFP_KERNEL);
+ if (!adapter->vport_params_recvd)
+ goto err_mem;
+
+ adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
+ if (!adapter->vport_ids)
+ goto err_mem;
+
+ if (adapter->vport_config)
+ return 0;
+
+ adapter->vport_config = kcalloc(num_max_vports,
+ sizeof(*adapter->vport_config),
+ GFP_KERNEL);
+ if (!adapter->vport_config)
+ goto err_mem;
+
+ return 0;
+
+err_mem:
+ idpf_vport_params_buf_rel(adapter);
+
+ return -ENOMEM;
+}
+
+/**
+ * idpf_vc_core_init - Initialize state machine and get driver specific
+ * resources
+ * @adapter: Driver specific private structure
+ *
+ * This function will initialize the state machine and request all necessary
+ * resources required by the device driver. Once the state machine is
+ * initialized, allocate memory to store vport specific information and also
+ * requests required interrupts.
+ *
+ * Returns 0 on success, -EAGAIN function will get called again,
+ * otherwise negative on failure.
+ */
+int idpf_vc_core_init(struct idpf_adapter *adapter)
+{
+ int task_delay = 30;
+ u16 num_max_vports;
+ int err = 0;
+
+ while (adapter->state != __IDPF_INIT_SW) {
+ switch (adapter->state) {
+ case __IDPF_STARTUP:
+ if (idpf_send_ver_msg(adapter))
+ goto init_failed;
+ adapter->state = __IDPF_VER_CHECK;
+ goto restart;
+ case __IDPF_VER_CHECK:
+ err = idpf_recv_ver_msg(adapter);
+ if (err == -EIO) {
+ return err;
+ } else if (err == -EAGAIN) {
+ adapter->state = __IDPF_STARTUP;
+ goto restart;
+ } else if (err) {
+ goto init_failed;
+ }
+ if (idpf_send_get_caps_msg(adapter))
+ goto init_failed;
+ adapter->state = __IDPF_GET_CAPS;
+ goto restart;
+ case __IDPF_GET_CAPS:
+ if (idpf_recv_get_caps_msg(adapter))
+ goto init_failed;
+ adapter->state = __IDPF_INIT_SW;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
+ adapter->state);
+ goto init_failed;
+ }
+ break;
+restart:
+ /* Give enough time before proceeding further with
+ * state machine
+ */
+ msleep(task_delay);
+ }
+
+ pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
+ num_max_vports = idpf_get_max_vports(adapter);
+ adapter->max_vports = num_max_vports;
+ adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports),
+ GFP_KERNEL);
+ if (!adapter->vports)
+ return -ENOMEM;
+
+ if (!adapter->netdevs) {
+ adapter->netdevs = kcalloc(num_max_vports,
+ sizeof(struct net_device *),
+ GFP_KERNEL);
+ if (!adapter->netdevs) {
+ err = -ENOMEM;
+ goto err_netdev_alloc;
+ }
+ }
+
+ err = idpf_vport_params_buf_alloc(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
+ err);
+ goto err_netdev_alloc;
+ }
+
+ /* Start the mailbox task before requesting vectors. This will ensure
+ * vector information response from mailbox is handled
+ */
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
+ queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
+ msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
+
+ err = idpf_intr_req(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
+ err);
+ goto err_intr_req;
+ }
+
+ idpf_init_avail_queues(adapter);
+
+ /* Skew the delay for init tasks for each function based on fn number
+ * to prevent every function from making the same call simultaneously.
+ */
+ queue_delayed_work(adapter->init_wq, &adapter->init_task,
+ msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
+
+ goto no_err;
+
+err_intr_req:
+ cancel_delayed_work_sync(&adapter->serv_task);
+ idpf_vport_params_buf_rel(adapter);
+err_netdev_alloc:
+ kfree(adapter->vports);
+ adapter->vports = NULL;
+no_err:
+ return err;
+
+init_failed:
+ /* Don't retry if we're trying to go down, just bail. */
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ return err;
+
+ if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
+ dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
+
+ return -EFAULT;
+ }
+ /* If it reached here, it is possible that mailbox queue initialization
+ * register writes might not have taken effect. Retry to initialize
+ * the mailbox again
+ */
+ adapter->state = __IDPF_STARTUP;
+ idpf_deinit_dflt_mbx(adapter);
+ set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
+ msecs_to_jiffies(task_delay));
+
+ return -EAGAIN;
+}
+
+/**
+ * idpf_vc_core_deinit - Device deinit routine
+ * @adapter: Driver specific private structure
+ *
+ */
+void idpf_vc_core_deinit(struct idpf_adapter *adapter)
+{
+ int i;
+
+ idpf_deinit_task(adapter);
+ idpf_intr_rel(adapter);
+ /* Set all bits as we dont know on which vc_state the vhnl_wq is
+ * waiting on and wakeup the virtchnl workqueue even if it is waiting
+ * for the response as we are going down
+ */
+ for (i = 0; i < IDPF_VC_NBITS; i++)
+ set_bit(i, adapter->vc_state);
+ wake_up(&adapter->vchnl_wq);
+
+ cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->mbx_task);
+
+ idpf_vport_params_buf_rel(adapter);
+
+ /* Clear all the bits */
+ for (i = 0; i < IDPF_VC_NBITS; i++)
+ clear_bit(i, adapter->vc_state);
+
+ kfree(adapter->vports);
+ adapter->vports = NULL;
+}
+
+/**
+ * idpf_vport_alloc_vec_indexes - Get relative vector indexes
+ * @vport: virtual port data struct
+ *
+ * This function requests the vector information required for the vport and
+ * stores the vector indexes received from the 'global vector distribution'
+ * in the vport's queue vectors array.
+ *
+ * Return 0 on success, error on failure
+ */
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
+{
+ struct idpf_vector_info vec_info;
+ int num_alloc_vecs;
+
+ vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
+ vec_info.default_vport = vport->default_vport;
+ vec_info.index = vport->idx;
+
+ num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
+ vport->q_vector_idxs,
+ &vec_info);
+ if (num_alloc_vecs <= 0) {
+ dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
+ num_alloc_vecs);
+ return -EINVAL;
+ }
+
+ vport->num_q_vectors = num_alloc_vecs;
+
+ return 0;
+}
+
+/**
+ * idpf_vport_init - Initialize virtual port
+ * @vport: virtual port to be initialized
+ * @max_q: vport max queue info
+ *
+ * Will initialize vport with the info received through MB earlier
+ */
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_create_vport *vport_msg;
+ struct idpf_vport_config *vport_config;
+ u16 tx_itr[] = {2, 8, 64, 128, 256};
+ u16 rx_itr[] = {2, 8, 32, 96, 128};
+ struct idpf_rss_data *rss_data;
+ u16 idx = vport->idx;
+
+ vport_config = adapter->vport_config[idx];
+ rss_data = &vport_config->user_config.rss_data;
+ vport_msg = adapter->vport_params_recvd[idx];
+
+ vport_config->max_q.max_txq = max_q->max_txq;
+ vport_config->max_q.max_rxq = max_q->max_rxq;
+ vport_config->max_q.max_complq = max_q->max_complq;
+ vport_config->max_q.max_bufq = max_q->max_bufq;
+
+ vport->txq_model = le16_to_cpu(vport_msg->txq_model);
+ vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
+ vport->vport_type = le16_to_cpu(vport_msg->vport_type);
+ vport->vport_id = le32_to_cpu(vport_msg->vport_id);
+
+ rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(vport_msg->rss_key_size));
+ rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
+
+ ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
+ vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD;
+
+ /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
+ memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
+ memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
+
+ idpf_vport_init_num_qs(vport, vport_msg);
+ idpf_vport_calc_num_q_desc(vport);
+ idpf_vport_calc_num_q_groups(vport);
+ idpf_vport_alloc_vec_indexes(vport);
+
+ vport->crc_enable = adapter->crc_enable;
+}
+
+/**
+ * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
+ * @adapter: adapter structure to get the mailbox vector id
+ * @vecids: Array of vector ids
+ * @num_vecids: number of vector ids
+ * @chunks: vector ids received over mailbox
+ *
+ * Will initialize the mailbox vector id which is received from the
+ * get capabilities and data queue vector ids with ids received as
+ * mailbox parameters.
+ * Returns number of ids filled
+ */
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks)
+{
+ u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
+ int num_vecid_filled = 0;
+ int i, j;
+
+ vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
+ num_vecid_filled++;
+
+ for (j = 0; j < num_chunks; j++) {
+ struct virtchnl2_vector_chunk *chunk;
+ u16 start_vecid, num_vec;
+
+ chunk = &chunks->vchunks[j];
+ num_vec = le16_to_cpu(chunk->num_vectors);
+ start_vecid = le16_to_cpu(chunk->start_vector_id);
+
+ for (i = 0; i < num_vec; i++) {
+ if ((num_vecid_filled + i) < num_vecids) {
+ vecids[num_vecid_filled + i] = start_vecid;
+ start_vecid++;
+ } else {
+ break;
+ }
+ }
+ num_vecid_filled = num_vecid_filled + i;
+ }
+
+ return num_vecid_filled;
+}
+
+/**
+ * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
+ * @qids: Array of queue ids
+ * @num_qids: number of queue ids
+ * @q_type: queue model
+ * @chunks: queue ids received over mailbox
+ *
+ * Will initialize all queue ids with ids received as mailbox parameters
+ * Returns number of ids filled
+ */
+static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
+ struct virtchnl2_queue_reg_chunks *chunks)
+{
+ u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u32 num_q_id_filled = 0, i;
+ u32 start_q_id, num_q;
+
+ while (num_chunks--) {
+ struct virtchnl2_queue_reg_chunk *chunk;
+
+ chunk = &chunks->chunks[num_chunks];
+ if (le32_to_cpu(chunk->type) != q_type)
+ continue;
+
+ num_q = le32_to_cpu(chunk->num_queues);
+ start_q_id = le32_to_cpu(chunk->start_queue_id);
+
+ for (i = 0; i < num_q; i++) {
+ if ((num_q_id_filled + i) < num_qids) {
+ qids[num_q_id_filled + i] = start_q_id;
+ start_q_id++;
+ } else {
+ break;
+ }
+ }
+ num_q_id_filled = num_q_id_filled + i;
+ }
+
+ return num_q_id_filled;
+}
+
+/**
+ * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
+ * @vport: virtual port for which the queues ids are initialized
+ * @qids: queue ids
+ * @num_qids: number of queue ids
+ * @q_type: type of queue
+ *
+ * Will initialize all queue ids with ids received as mailbox
+ * parameters. Returns number of queue ids initialized.
+ */
+static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ const u32 *qids,
+ int num_qids,
+ u32 q_type)
+{
+ struct idpf_queue *q;
+ int i, j, k = 0;
+
+ switch (q_type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) {
+ tx_qgrp->txqs[j]->q_id = qids[k];
+ tx_qgrp->txqs[j]->q_type =
+ VIRTCHNL2_QUEUE_TYPE_TX;
+ }
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+ q->q_id = qids[k];
+ q->q_type = VIRTCHNL2_QUEUE_TYPE_RX;
+ }
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ tx_qgrp->complq->q_id = qids[k];
+ tx_qgrp->complq->q_type =
+ VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u8 num_bufqs = vport->num_bufqs_per_qgrp;
+
+ for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ q->q_id = qids[k];
+ q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return k;
+}
+
+/**
+ * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
+ * @vport: virtual port for which the queues ids are initialized
+ *
+ * Will initialize all queue ids with ids received as mailbox parameters.
+ * Returns 0 on success, negative if all the queues are not initialized.
+ */
+int idpf_vport_queue_ids_init(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_params;
+ struct virtchnl2_queue_reg_chunks *chunks;
+ struct idpf_vport_config *vport_config;
+ u16 vport_idx = vport->idx;
+ int num_ids, err = 0;
+ u16 q_type;
+ u32 *qids;
+
+ vport_config = vport->adapter->vport_config[vport_idx];
+ if (vport_config->req_qs_chunks) {
+ struct virtchnl2_add_queues *vc_aq =
+ (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
+ chunks = &vc_aq->chunks;
+ } else {
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
+ chunks = &vport_params->chunks;
+ }
+
+ qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
+ if (!qids)
+ return -ENOMEM;
+
+ num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
+ VIRTCHNL2_QUEUE_TYPE_TX,
+ chunks);
+ if (num_ids < vport->num_txq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+ num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ VIRTCHNL2_QUEUE_TYPE_TX);
+ if (num_ids < vport->num_txq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+
+ num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
+ VIRTCHNL2_QUEUE_TYPE_RX,
+ chunks);
+ if (num_ids < vport->num_rxq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+ num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ VIRTCHNL2_QUEUE_TYPE_RX);
+ if (num_ids < vport->num_rxq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ goto check_rxq;
+
+ q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
+ if (num_ids < vport->num_complq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+ num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
+ if (num_ids < vport->num_complq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+
+check_rxq:
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ goto mem_rel;
+
+ q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
+ if (num_ids < vport->num_bufq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+ num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
+ if (num_ids < vport->num_bufq)
+ err = -EINVAL;
+
+mem_rel:
+ kfree(qids);
+
+ return err;
+}
+
+/**
+ * idpf_vport_adjust_qs - Adjust to new requested queues
+ * @vport: virtual port data struct
+ *
+ * Renegotiate queues. Returns 0 on success, negative on failure.
+ */
+int idpf_vport_adjust_qs(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport vport_msg;
+ int err;
+
+ vport_msg.txq_model = cpu_to_le16(vport->txq_model);
+ vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
+ err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
+ NULL);
+ if (err)
+ return err;
+
+ idpf_vport_init_num_qs(vport, &vport_msg);
+ idpf_vport_calc_num_q_groups(vport);
+
+ return 0;
+}
+
+/**
+ * idpf_is_capability_ena - Default implementation of capability checking
+ * @adapter: Private data struct
+ * @all: all or one flag
+ * @field: caps field to check for flags
+ * @flag: flag to check
+ *
+ * Return true if all capabilities are supported, false otherwise
+ */
+bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
+ enum idpf_cap_field field, u64 flag)
+{
+ u8 *caps = (u8 *)&adapter->caps;
+ u32 *cap_field;
+
+ if (!caps)
+ return false;
+
+ if (field == IDPF_BASE_CAPS)
+ return false;
+
+ cap_field = (u32 *)(caps + field);
+
+ if (all)
+ return (*cap_field & flag) == flag;
+ else
+ return !!(*cap_field & flag);
+}
+
+/**
+ * idpf_get_vport_id: Get vport id
+ * @vport: virtual port structure
+ *
+ * Return vport id from the adapter persistent data
+ */
+u32 idpf_get_vport_id(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+
+ return le32_to_cpu(vport_msg->vport_id);
+}
+
+/**
+ * idpf_add_del_mac_filters - Add/del mac filters
+ * @vport: Virtual port data structure
+ * @np: Netdev private structure
+ * @add: Add or delete flag
+ * @async: Don't wait for return message
+ *
+ * Returns 0 on success, error on failure.
+ **/
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async)
+{
+ struct virtchnl2_mac_addr_list *ma_list = NULL;
+ struct idpf_adapter *adapter = np->adapter;
+ struct idpf_vport_config *vport_config;
+ enum idpf_vport_config_flags mac_flag;
+ struct pci_dev *pdev = adapter->pdev;
+ enum idpf_vport_vc_state vc, vc_err;
+ struct virtchnl2_mac_addr *mac_addr;
+ struct idpf_mac_filter *f, *tmp;
+ u32 num_msgs, total_filters = 0;
+ int i = 0, k, err = 0;
+ u32 vop;
+
+ vport_config = adapter->vport_config[np->vport_idx];
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ /* Find the number of newly added filters */
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
+ list) {
+ if (add && f->add)
+ total_filters++;
+ else if (!add && f->remove)
+ total_filters++;
+ }
+
+ if (!total_filters) {
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return 0;
+ }
+
+ /* Fill all the new filters into virtchannel message */
+ mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
+ GFP_ATOMIC);
+ if (!mac_addr) {
+ err = -ENOMEM;
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+ goto error;
+ }
+
+ list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list,
+ list) {
+ if (add && f->add) {
+ ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ i++;
+ f->add = false;
+ if (i == total_filters)
+ break;
+ }
+ if (!add && f->remove) {
+ ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ i++;
+ f->remove = false;
+ if (i == total_filters)
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ if (add) {
+ vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
+ vc = IDPF_VC_ADD_MAC_ADDR;
+ vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
+ mac_flag = IDPF_VPORT_ADD_MAC_REQ;
+ } else {
+ vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
+ vc = IDPF_VC_DEL_MAC_ADDR;
+ vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
+ mac_flag = IDPF_VPORT_DEL_MAC_REQ;
+ }
+
+ /* Chunk up the filters into multiple messages to avoid
+ * sending a control queue message buffer that is too large
+ */
+ num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
+
+ if (!async)
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ u32 entries_size, buf_size, num_entries;
+
+ num_entries = min_t(u32, total_filters,
+ IDPF_NUM_FILTERS_PER_MSG);
+ entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
+ buf_size = struct_size(ma_list, mac_addr_list, num_entries);
+
+ if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
+ kfree(ma_list);
+ ma_list = kzalloc(buf_size, GFP_ATOMIC);
+ if (!ma_list) {
+ err = -ENOMEM;
+ goto list_prep_error;
+ }
+ } else {
+ memset(ma_list, 0, buf_size);
+ }
+
+ ma_list->vport_id = cpu_to_le32(np->vport_id);
+ ma_list->num_mac_addr = cpu_to_le16(num_entries);
+ memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
+
+ if (async)
+ set_bit(mac_flag, vport_config->flags);
+
+ err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list);
+ if (err)
+ goto mbx_error;
+
+ if (!async) {
+ err = idpf_wait_for_event(adapter, vport, vc, vc_err);
+ if (err)
+ goto mbx_error;
+ }
+
+ k += num_entries;
+ total_filters -= num_entries;
+ }
+
+mbx_error:
+ if (!async)
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(ma_list);
+list_prep_error:
+ kfree(mac_addr);
+error:
+ if (err)
+ dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
+
+ return err;
+}
+
+/**
+ * idpf_set_promiscuous - set promiscuous and send message to mailbox
+ * @adapter: Driver specific private structure
+ * @config_data: Vport specific config data
+ * @vport_id: Vport identifier
+ *
+ * Request to enable promiscuous mode for the vport. Message is sent
+ * asynchronously and won't wait for response. Returns 0 on success, negative
+ * on failure;
+ */
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id)
+{
+ struct virtchnl2_promisc_info vpi;
+ u16 flags = 0;
+ int err;
+
+ if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
+ flags |= VIRTCHNL2_UNICAST_PROMISC;
+ if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
+ flags |= VIRTCHNL2_MULTICAST_PROMISC;
+
+ vpi.vport_id = cpu_to_le32(vport_id);
+ vpi.flags = cpu_to_le16(flags);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,
+ sizeof(struct virtchnl2_promisc_info),
+ (u8 *)&vpi);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
new file mode 100644
index 000000000000..07e72c72d156
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -0,0 +1,1273 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _VIRTCHNL2_H_
+#define _VIRTCHNL2_H_
+
+/* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
+ * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
+ * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
+ *
+ * PF/VF uses the virtchnl2 interface defined in this header file to communicate
+ * with device Control Plane (CP). Driver and the CP may run on different
+ * platforms with different endianness. To avoid byte order discrepancies,
+ * all the structures in this header follow little-endian format.
+ *
+ * This is an interface definition file where existing enums and their values
+ * must remain unchanged over time, so we specify explicit values for all enums.
+ */
+
+#include "virtchnl2_lan_desc.h"
+
+/* This macro is used to generate compilation errors if a structure
+ * is not exactly the correct length.
+ */
+#define VIRTCHNL2_CHECK_STRUCT_LEN(n, X) \
+ static_assert((n) == sizeof(struct X))
+
+/* New major set of opcodes introduced and so leaving room for
+ * old misc opcodes to be added in future. Also these opcodes may only
+ * be used if both the PF and VF have successfully negotiated the
+ * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange.
+ */
+enum virtchnl2_op {
+ VIRTCHNL2_OP_UNKNOWN = 0,
+ VIRTCHNL2_OP_VERSION = 1,
+ VIRTCHNL2_OP_GET_CAPS = 500,
+ VIRTCHNL2_OP_CREATE_VPORT = 501,
+ VIRTCHNL2_OP_DESTROY_VPORT = 502,
+ VIRTCHNL2_OP_ENABLE_VPORT = 503,
+ VIRTCHNL2_OP_DISABLE_VPORT = 504,
+ VIRTCHNL2_OP_CONFIG_TX_QUEUES = 505,
+ VIRTCHNL2_OP_CONFIG_RX_QUEUES = 506,
+ VIRTCHNL2_OP_ENABLE_QUEUES = 507,
+ VIRTCHNL2_OP_DISABLE_QUEUES = 508,
+ VIRTCHNL2_OP_ADD_QUEUES = 509,
+ VIRTCHNL2_OP_DEL_QUEUES = 510,
+ VIRTCHNL2_OP_MAP_QUEUE_VECTOR = 511,
+ VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR = 512,
+ VIRTCHNL2_OP_GET_RSS_KEY = 513,
+ VIRTCHNL2_OP_SET_RSS_KEY = 514,
+ VIRTCHNL2_OP_GET_RSS_LUT = 515,
+ VIRTCHNL2_OP_SET_RSS_LUT = 516,
+ VIRTCHNL2_OP_GET_RSS_HASH = 517,
+ VIRTCHNL2_OP_SET_RSS_HASH = 518,
+ VIRTCHNL2_OP_SET_SRIOV_VFS = 519,
+ VIRTCHNL2_OP_ALLOC_VECTORS = 520,
+ VIRTCHNL2_OP_DEALLOC_VECTORS = 521,
+ VIRTCHNL2_OP_EVENT = 522,
+ VIRTCHNL2_OP_GET_STATS = 523,
+ VIRTCHNL2_OP_RESET_VF = 524,
+ VIRTCHNL2_OP_GET_EDT_CAPS = 525,
+ VIRTCHNL2_OP_GET_PTYPE_INFO = 526,
+ /* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
+ * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
+ * Opcodes 529, 530, 531, 532 and 533 are reserved.
+ */
+ VIRTCHNL2_OP_LOOPBACK = 534,
+ VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
+ VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
+ VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537,
+};
+
+/**
+ * enum virtchnl2_vport_type - Type of virtual port.
+ * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type.
+ */
+enum virtchnl2_vport_type {
+ VIRTCHNL2_VPORT_TYPE_DEFAULT = 0,
+};
+
+/**
+ * enum virtchnl2_queue_model - Type of queue model.
+ * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model.
+ * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model.
+ *
+ * In the single queue model, the same transmit descriptor queue is used by
+ * software to post descriptors to hardware and by hardware to post completed
+ * descriptors to software.
+ * Likewise, the same receive descriptor queue is used by hardware to post
+ * completions to software and by software to post buffers to hardware.
+ *
+ * In the split queue model, hardware uses transmit completion queues to post
+ * descriptor/buffer completions to software, while software uses transmit
+ * descriptor queues to post descriptors to hardware.
+ * Likewise, hardware posts descriptor completions to the receive descriptor
+ * queue, while software uses receive buffer queues to post buffers to hardware.
+ */
+enum virtchnl2_queue_model {
+ VIRTCHNL2_QUEUE_MODEL_SINGLE = 0,
+ VIRTCHNL2_QUEUE_MODEL_SPLIT = 1,
+};
+
+/* Checksum offload capability flags */
+enum virtchnl2_cap_txrx_csum {
+ VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 = BIT(0),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP = BIT(1),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP = BIT(2),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP = BIT(3),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP = BIT(4),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP = BIT(5),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_CAP_TX_CSUM_GENERIC = BIT(7),
+ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 = BIT(8),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP = BIT(9),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP = BIT(10),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP = BIT(11),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP = BIT(12),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP = BIT(13),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP = BIT(14),
+ VIRTCHNL2_CAP_RX_CSUM_GENERIC = BIT(15),
+ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL = BIT(16),
+ VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL = BIT(17),
+ VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL = BIT(18),
+ VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL = BIT(19),
+ VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL = BIT(20),
+ VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL = BIT(21),
+ VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL = BIT(22),
+ VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL = BIT(23),
+};
+
+/* Segmentation offload capability flags */
+enum virtchnl2_cap_seg {
+ VIRTCHNL2_CAP_SEG_IPV4_TCP = BIT(0),
+ VIRTCHNL2_CAP_SEG_IPV4_UDP = BIT(1),
+ VIRTCHNL2_CAP_SEG_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_CAP_SEG_IPV6_TCP = BIT(3),
+ VIRTCHNL2_CAP_SEG_IPV6_UDP = BIT(4),
+ VIRTCHNL2_CAP_SEG_IPV6_SCTP = BIT(5),
+ VIRTCHNL2_CAP_SEG_GENERIC = BIT(6),
+ VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL = BIT(7),
+ VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8),
+};
+
+/* Receive Side Scaling Flow type capability flags */
+enum virtchnl2_cap_rss {
+ VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0),
+ VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1),
+ VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3),
+ VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4),
+ VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5),
+ VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7),
+ VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8),
+ VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9),
+ VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10),
+ VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11),
+ VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12),
+ VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13),
+};
+
+/* Header split capability flags */
+enum virtchnl2_cap_rx_hsplit_at {
+ /* for prepended metadata */
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L2 = BIT(0),
+ /* all VLANs go into header buffer */
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L3 = BIT(1),
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 = BIT(2),
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6 = BIT(3),
+};
+
+/* Receive Side Coalescing offload capability flags */
+enum virtchnl2_cap_rsc {
+ VIRTCHNL2_CAP_RSC_IPV4_TCP = BIT(0),
+ VIRTCHNL2_CAP_RSC_IPV4_SCTP = BIT(1),
+ VIRTCHNL2_CAP_RSC_IPV6_TCP = BIT(2),
+ VIRTCHNL2_CAP_RSC_IPV6_SCTP = BIT(3),
+};
+
+/* Other capability flags */
+enum virtchnl2_cap_other {
+ VIRTCHNL2_CAP_RDMA = BIT_ULL(0),
+ VIRTCHNL2_CAP_SRIOV = BIT_ULL(1),
+ VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2),
+ VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
+ /* Queue based scheduling using split queue model */
+ VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
+ VIRTCHNL2_CAP_CRC = BIT_ULL(5),
+ VIRTCHNL2_CAP_ADQ = BIT_ULL(6),
+ VIRTCHNL2_CAP_WB_ON_ITR = BIT_ULL(7),
+ VIRTCHNL2_CAP_PROMISC = BIT_ULL(8),
+ VIRTCHNL2_CAP_LINK_SPEED = BIT_ULL(9),
+ VIRTCHNL2_CAP_INLINE_IPSEC = BIT_ULL(10),
+ VIRTCHNL2_CAP_LARGE_NUM_QUEUES = BIT_ULL(11),
+ VIRTCHNL2_CAP_VLAN = BIT_ULL(12),
+ VIRTCHNL2_CAP_PTP = BIT_ULL(13),
+ /* EDT: Earliest Departure Time capability used for Timing Wheel */
+ VIRTCHNL2_CAP_EDT = BIT_ULL(14),
+ VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15),
+ VIRTCHNL2_CAP_FDIR = BIT_ULL(16),
+ VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
+ VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
+ VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
+ /* Other capability 20 is reserved */
+
+ /* this must be the last capability */
+ VIRTCHNL2_CAP_OEM = BIT_ULL(63),
+};
+
+/* underlying device type */
+enum virtchl2_device_type {
+ VIRTCHNL2_MEV_DEVICE = 0,
+};
+
+/**
+ * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes.
+ * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder
+ * completions where descriptors and buffers
+ * are completed at the same time.
+ * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order
+ * packet processing where descriptors are
+ * cleaned in order, but buffers can be
+ * completed out of order.
+ */
+enum virtchnl2_txq_sched_mode {
+ VIRTCHNL2_TXQ_SCHED_MODE_QUEUE = 0,
+ VIRTCHNL2_TXQ_SCHED_MODE_FLOW = 1,
+};
+
+/**
+ * enum virtchnl2_rxq_flags - Receive Queue Feature flags.
+ * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag.
+ * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag.
+ * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed
+ * by hardware immediately after processing
+ * each packet.
+ * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size.
+ * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size.
+ */
+enum virtchnl2_rxq_flags {
+ VIRTCHNL2_RXQ_RSC = BIT(0),
+ VIRTCHNL2_RXQ_HDR_SPLIT = BIT(1),
+ VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK = BIT(2),
+ VIRTCHNL2_RX_DESC_SIZE_16BYTE = BIT(3),
+ VIRTCHNL2_RX_DESC_SIZE_32BYTE = BIT(4),
+};
+
+/* Type of RSS algorithm */
+enum virtchnl2_rss_alg {
+ VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL2_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/* Type of event */
+enum virtchnl2_event_codes {
+ VIRTCHNL2_EVENT_UNKNOWN = 0,
+ VIRTCHNL2_EVENT_LINK_CHANGE = 1,
+ /* Event type 2, 3 are reserved */
+};
+
+/* Transmit and Receive queue types are valid in legacy as well as split queue
+ * models. With Split Queue model, 2 additional types are introduced -
+ * TX_COMPLETION and RX_BUFFER. In split queue model, receive corresponds to
+ * the queue where hardware posts completions.
+ */
+enum virtchnl2_queue_type {
+ VIRTCHNL2_QUEUE_TYPE_TX = 0,
+ VIRTCHNL2_QUEUE_TYPE_RX = 1,
+ VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION = 2,
+ VIRTCHNL2_QUEUE_TYPE_RX_BUFFER = 3,
+ VIRTCHNL2_QUEUE_TYPE_CONFIG_TX = 4,
+ VIRTCHNL2_QUEUE_TYPE_CONFIG_RX = 5,
+ /* Queue types 6, 7, 8, 9 are reserved */
+ VIRTCHNL2_QUEUE_TYPE_MBX_TX = 10,
+ VIRTCHNL2_QUEUE_TYPE_MBX_RX = 11,
+};
+
+/* Interrupt throttling rate index */
+enum virtchnl2_itr_idx {
+ VIRTCHNL2_ITR_IDX_0 = 0,
+ VIRTCHNL2_ITR_IDX_1 = 1,
+};
+
+/**
+ * enum virtchnl2_mac_addr_type - MAC address types.
+ * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the
+ * primary/device unicast MAC address filter for
+ * VIRTCHNL2_OP_ADD_MAC_ADDR and
+ * VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the
+ * underlying control plane function to accurately
+ * track the MAC address and for VM/function reset.
+ *
+ * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra
+ * unicast and/or multicast filters that are being
+ * added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or
+ * VIRTCHNL2_OP_DEL_MAC_ADDR.
+ */
+enum virtchnl2_mac_addr_type {
+ VIRTCHNL2_MAC_ADDR_PRIMARY = 1,
+ VIRTCHNL2_MAC_ADDR_EXTRA = 2,
+};
+
+/* Flags used for promiscuous mode */
+enum virtchnl2_promisc_flags {
+ VIRTCHNL2_UNICAST_PROMISC = BIT(0),
+ VIRTCHNL2_MULTICAST_PROMISC = BIT(1),
+};
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl2_proto_hdr_type {
+ /* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_ANY = 0,
+ VIRTCHNL2_PROTO_HDR_PRE_MAC = 1,
+ /* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_MAC = 2,
+ VIRTCHNL2_PROTO_HDR_POST_MAC = 3,
+ VIRTCHNL2_PROTO_HDR_ETHERTYPE = 4,
+ VIRTCHNL2_PROTO_HDR_VLAN = 5,
+ VIRTCHNL2_PROTO_HDR_SVLAN = 6,
+ VIRTCHNL2_PROTO_HDR_CVLAN = 7,
+ VIRTCHNL2_PROTO_HDR_MPLS = 8,
+ VIRTCHNL2_PROTO_HDR_UMPLS = 9,
+ VIRTCHNL2_PROTO_HDR_MMPLS = 10,
+ VIRTCHNL2_PROTO_HDR_PTP = 11,
+ VIRTCHNL2_PROTO_HDR_CTRL = 12,
+ VIRTCHNL2_PROTO_HDR_LLDP = 13,
+ VIRTCHNL2_PROTO_HDR_ARP = 14,
+ VIRTCHNL2_PROTO_HDR_ECP = 15,
+ VIRTCHNL2_PROTO_HDR_EAPOL = 16,
+ VIRTCHNL2_PROTO_HDR_PPPOD = 17,
+ VIRTCHNL2_PROTO_HDR_PPPOE = 18,
+ /* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV4 = 19,
+ /* IPv4 and IPv6 Fragment header types are only associated to
+ * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
+ * cannot be used independently.
+ */
+ /* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV4_FRAG = 20,
+ /* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV6 = 21,
+ /* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV6_FRAG = 22,
+ VIRTCHNL2_PROTO_HDR_IPV6_EH = 23,
+ /* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_UDP = 24,
+ /* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_TCP = 25,
+ /* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_SCTP = 26,
+ /* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_ICMP = 27,
+ /* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_ICMPV6 = 28,
+ VIRTCHNL2_PROTO_HDR_IGMP = 29,
+ VIRTCHNL2_PROTO_HDR_AH = 30,
+ VIRTCHNL2_PROTO_HDR_ESP = 31,
+ VIRTCHNL2_PROTO_HDR_IKE = 32,
+ VIRTCHNL2_PROTO_HDR_NATT_KEEP = 33,
+ /* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_PAY = 34,
+ VIRTCHNL2_PROTO_HDR_L2TPV2 = 35,
+ VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL = 36,
+ VIRTCHNL2_PROTO_HDR_L2TPV3 = 37,
+ VIRTCHNL2_PROTO_HDR_GTP = 38,
+ VIRTCHNL2_PROTO_HDR_GTP_EH = 39,
+ VIRTCHNL2_PROTO_HDR_GTPCV2 = 40,
+ VIRTCHNL2_PROTO_HDR_GTPC_TEID = 41,
+ VIRTCHNL2_PROTO_HDR_GTPU = 42,
+ VIRTCHNL2_PROTO_HDR_GTPU_UL = 43,
+ VIRTCHNL2_PROTO_HDR_GTPU_DL = 44,
+ VIRTCHNL2_PROTO_HDR_ECPRI = 45,
+ VIRTCHNL2_PROTO_HDR_VRRP = 46,
+ VIRTCHNL2_PROTO_HDR_OSPF = 47,
+ /* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_TUN = 48,
+ VIRTCHNL2_PROTO_HDR_GRE = 49,
+ VIRTCHNL2_PROTO_HDR_NVGRE = 50,
+ VIRTCHNL2_PROTO_HDR_VXLAN = 51,
+ VIRTCHNL2_PROTO_HDR_VXLAN_GPE = 52,
+ VIRTCHNL2_PROTO_HDR_GENEVE = 53,
+ VIRTCHNL2_PROTO_HDR_NSH = 54,
+ VIRTCHNL2_PROTO_HDR_QUIC = 55,
+ VIRTCHNL2_PROTO_HDR_PFCP = 56,
+ VIRTCHNL2_PROTO_HDR_PFCP_NODE = 57,
+ VIRTCHNL2_PROTO_HDR_PFCP_SESSION = 58,
+ VIRTCHNL2_PROTO_HDR_RTP = 59,
+ VIRTCHNL2_PROTO_HDR_ROCE = 60,
+ VIRTCHNL2_PROTO_HDR_ROCEV1 = 61,
+ VIRTCHNL2_PROTO_HDR_ROCEV2 = 62,
+ /* Protocol ids up to 32767 are reserved.
+ * 32768 - 65534 are used for user defined protocol ids.
+ * VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id.
+ */
+ VIRTCHNL2_PROTO_HDR_NO_PROTO = 65535,
+};
+
+enum virtchl2_version {
+ VIRTCHNL2_VERSION_MINOR_0 = 0,
+ VIRTCHNL2_VERSION_MAJOR_2 = 2,
+};
+
+/**
+ * struct virtchnl2_edt_caps - Get EDT granularity and time horizon.
+ * @tstamp_granularity_ns: Timestamp granularity in nanoseconds.
+ * @time_horizon_ns: Total time window in nanoseconds.
+ *
+ * Associated with VIRTCHNL2_OP_GET_EDT_CAPS.
+ */
+struct virtchnl2_edt_caps {
+ __le64 tstamp_granularity_ns;
+ __le64 time_horizon_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_edt_caps);
+
+/**
+ * struct virtchnl2_version_info - Version information.
+ * @major: Major version.
+ * @minor: Minor version.
+ *
+ * PF/VF posts its version number to the CP. CP responds with its version number
+ * in the same format, along with a return code.
+ * If there is a major version mismatch, then the PF/VF cannot operate.
+ * If there is a minor version mismatch, then the PF/VF can operate but should
+ * add a warning to the system log.
+ *
+ * This version opcode MUST always be specified as == 1, regardless of other
+ * changes in the API. The CP must always respond to this message without
+ * error regardless of version mismatch.
+ *
+ * Associated with VIRTCHNL2_OP_VERSION.
+ */
+struct virtchnl2_version_info {
+ __le32 major;
+ __le32 minor;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
+
+/**
+ * struct virtchnl2_get_capabilities - Capabilities info.
+ * @csum_caps: See enum virtchnl2_cap_txrx_csum.
+ * @seg_caps: See enum virtchnl2_cap_seg.
+ * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
+ * @rsc_caps: See enum virtchnl2_cap_rsc.
+ * @rss_caps: See enum virtchnl2_cap_rss.
+ * @other_caps: See enum virtchnl2_cap_other.
+ * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
+ * provided by CP.
+ * @mailbox_vector_id: Mailbox vector id.
+ * @num_allocated_vectors: Maximum number of allocated vectors for the device.
+ * @max_rx_q: Maximum number of supported Rx queues.
+ * @max_tx_q: Maximum number of supported Tx queues.
+ * @max_rx_bufq: Maximum number of supported buffer queues.
+ * @max_tx_complq: Maximum number of supported completion queues.
+ * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP
+ * responds with the maximum VFs granted.
+ * @max_vports: Maximum number of vports that can be supported.
+ * @default_num_vports: Default number of vports driver should allocate on load.
+ * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes.
+ * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be
+ * sent per transmit packet without needing to be
+ * linearized.
+ * @pad: Padding.
+ * @reserved: Reserved.
+ * @device_type: See enum virtchl2_device_type.
+ * @min_sso_packet_len: Min packet length supported by device for single
+ * segment offload.
+ * @max_hdr_buf_per_lso: Max number of header buffers that can be used for
+ * an LSO.
+ * @pad1: Padding for future extensions.
+ *
+ * Dataplane driver sends this message to CP to negotiate capabilities and
+ * provides a virtchnl2_get_capabilities structure with its desired
+ * capabilities, max_sriov_vfs and num_allocated_vectors.
+ * CP responds with a virtchnl2_get_capabilities structure updated
+ * with allowed capabilities and the other fields as below.
+ * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs
+ * that can be created by this PF. For any other value 'n', CP responds
+ * with max_sriov_vfs set to min(n, x) where x is the max number of VFs
+ * allowed by CP's policy. max_sriov_vfs is not applicable for VFs.
+ * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1
+ * which is default vector associated with the default mailbox. For any other
+ * value 'n', CP responds with a value <= n based on the CP's policy of
+ * max number of vectors for a PF.
+ * CP will respond with the vector ID of mailbox allocated to the PF in
+ * mailbox_vector_id and the number of itr index registers in itr_idx_map.
+ * It also responds with default number of vports that the dataplane driver
+ * should comeup with in default_num_vports and maximum number of vports that
+ * can be supported in max_vports.
+ *
+ * Associated with VIRTCHNL2_OP_GET_CAPS.
+ */
+struct virtchnl2_get_capabilities {
+ __le32 csum_caps;
+ __le32 seg_caps;
+ __le32 hsplit_caps;
+ __le32 rsc_caps;
+ __le64 rss_caps;
+ __le64 other_caps;
+ __le32 mailbox_dyn_ctl;
+ __le16 mailbox_vector_id;
+ __le16 num_allocated_vectors;
+ __le16 max_rx_q;
+ __le16 max_tx_q;
+ __le16 max_rx_bufq;
+ __le16 max_tx_complq;
+ __le16 max_sriov_vfs;
+ __le16 max_vports;
+ __le16 default_num_vports;
+ __le16 max_tx_hdr_size;
+ u8 max_sg_bufs_per_tx_pkt;
+ u8 pad[3];
+ u8 reserved[4];
+ __le32 device_type;
+ u8 min_sso_packet_len;
+ u8 max_hdr_buf_per_lso;
+ u8 pad1[10];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
+
+/**
+ * struct virtchnl2_queue_reg_chunk - Single queue chunk.
+ * @type: See enum virtchnl2_queue_type.
+ * @start_queue_id: Start Queue ID.
+ * @num_queues: Number of queues in the chunk.
+ * @pad: Padding.
+ * @qtail_reg_start: Queue tail register offset.
+ * @qtail_reg_spacing: Queue tail register spacing.
+ * @pad1: Padding for future extensions.
+ */
+struct virtchnl2_queue_reg_chunk {
+ __le32 type;
+ __le32 start_queue_id;
+ __le32 num_queues;
+ __le32 pad;
+ __le64 qtail_reg_start;
+ __le32 qtail_reg_spacing;
+ u8 pad1[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
+
+/**
+ * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous
+ * queues.
+ * @num_chunks: Number of chunks.
+ * @pad: Padding.
+ * @chunks: Chunks of queue info.
+ */
+struct virtchnl2_queue_reg_chunks {
+ __le16 num_chunks;
+ u8 pad[6];
+ struct virtchnl2_queue_reg_chunk chunks[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
+
+/**
+ * struct virtchnl2_create_vport - Create vport config info.
+ * @vport_type: See enum virtchnl2_vport_type.
+ * @txq_model: See virtchnl2_queue_model.
+ * @rxq_model: See virtchnl2_queue_model.
+ * @num_tx_q: Number of Tx queues.
+ * @num_tx_complq: Valid only if txq_model is split queue.
+ * @num_rx_q: Number of Rx queues.
+ * @num_rx_bufq: Valid only if rxq_model is split queue.
+ * @default_rx_q: Relative receive queue index to be used as default.
+ * @vport_index: Used to align PF and CP in case of default multiple vports,
+ * it is filled by the PF and CP returns the same value, to
+ * enable the driver to support multiple asynchronous parallel
+ * CREATE_VPORT requests and associate a response to a specific
+ * request.
+ * @max_mtu: Max MTU. CP populates this field on response.
+ * @vport_id: Vport id. CP populates this field on response.
+ * @default_mac_addr: Default MAC address.
+ * @pad: Padding.
+ * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
+ * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
+ * @pad1: Padding.
+ * @rss_algorithm: RSS algorithm.
+ * @rss_key_size: RSS key size.
+ * @rss_lut_size: RSS LUT size.
+ * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at.
+ * @pad2: Padding.
+ * @chunks: Chunks of contiguous queues.
+ *
+ * PF sends this message to CP to create a vport by filling in required
+ * fields of virtchnl2_create_vport structure.
+ * CP responds with the updated virtchnl2_create_vport structure containing the
+ * necessary fields followed by chunks which in turn will have an array of
+ * num_chunks entries of virtchnl2_queue_chunk structures.
+ *
+ * Associated with VIRTCHNL2_OP_CREATE_VPORT.
+ */
+struct virtchnl2_create_vport {
+ __le16 vport_type;
+ __le16 txq_model;
+ __le16 rxq_model;
+ __le16 num_tx_q;
+ __le16 num_tx_complq;
+ __le16 num_rx_q;
+ __le16 num_rx_bufq;
+ __le16 default_rx_q;
+ __le16 vport_index;
+ /* CP populates the following fields on response */
+ __le16 max_mtu;
+ __le32 vport_id;
+ u8 default_mac_addr[ETH_ALEN];
+ __le16 pad;
+ __le64 rx_desc_ids;
+ __le64 tx_desc_ids;
+ u8 pad1[72];
+ __le32 rss_algorithm;
+ __le16 rss_key_size;
+ __le16 rss_lut_size;
+ __le32 rx_split_pos;
+ u8 pad2[20];
+ struct virtchnl2_queue_reg_chunks chunks;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(160, virtchnl2_create_vport);
+
+/**
+ * struct virtchnl2_vport - Vport ID info.
+ * @vport_id: Vport id.
+ * @pad: Padding for future extensions.
+ *
+ * PF sends this message to CP to destroy, enable or disable a vport by filling
+ * in the vport_id in virtchnl2_vport structure.
+ * CP responds with the status of the requested operation.
+ *
+ * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT,
+ * VIRTCHNL2_OP_DISABLE_VPORT.
+ */
+struct virtchnl2_vport {
+ __le32 vport_id;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);
+
+/**
+ * struct virtchnl2_txq_info - Transmit queue config info
+ * @dma_ring_addr: DMA address.
+ * @type: See enum virtchnl2_queue_type.
+ * @queue_id: Queue ID.
+ * @relative_queue_id: Valid only if queue model is split and type is transmit
+ * queue. Used in many to one mapping of transmit queues to
+ * completion queue.
+ * @model: See enum virtchnl2_queue_model.
+ * @sched_mode: See enum virtchnl2_txq_sched_mode.
+ * @qflags: TX queue feature flags.
+ * @ring_len: Ring length.
+ * @tx_compl_queue_id: Valid only if queue model is split and type is transmit
+ * queue.
+ * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX
+ * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver
+ * messages for the respective CONFIG_TX queue.
+ * @pad: Padding.
+ * @egress_pasid: Egress PASID info.
+ * @egress_hdr_pasid: Egress HDR passid.
+ * @egress_buf_pasid: Egress buf passid.
+ * @pad1: Padding for future extensions.
+ */
+struct virtchnl2_txq_info {
+ __le64 dma_ring_addr;
+ __le32 type;
+ __le32 queue_id;
+ __le16 relative_queue_id;
+ __le16 model;
+ __le16 sched_mode;
+ __le16 qflags;
+ __le16 ring_len;
+ __le16 tx_compl_queue_id;
+ __le16 peer_type;
+ __le16 peer_rx_queue_id;
+ u8 pad[4];
+ __le32 egress_pasid;
+ __le32 egress_hdr_pasid;
+ __le32 egress_buf_pasid;
+ u8 pad1[8];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);
+
+/**
+ * struct virtchnl2_config_tx_queues - TX queue config.
+ * @vport_id: Vport id.
+ * @num_qinfo: Number of virtchnl2_txq_info structs.
+ * @pad: Padding.
+ * @qinfo: Tx queues config info.
+ *
+ * PF sends this message to set up parameters for one or more transmit queues.
+ * This message contains an array of num_qinfo instances of virtchnl2_txq_info
+ * structures. CP configures requested queues and returns a status code. If
+ * num_qinfo specified is greater than the number of queues associated with the
+ * vport, an error is returned and no queues are configured.
+ *
+ * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES.
+ */
+struct virtchnl2_config_tx_queues {
+ __le32 vport_id;
+ __le16 num_qinfo;
+ u8 pad[10];
+ struct virtchnl2_txq_info qinfo[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
+
+/**
+ * struct virtchnl2_rxq_info - Receive queue config info.
+ * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
+ * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions.
+ * @type: See enum virtchnl2_queue_type.
+ * @queue_id: Queue id.
+ * @model: See enum virtchnl2_queue_model.
+ * @hdr_buffer_size: Header buffer size.
+ * @data_buffer_size: Data buffer size.
+ * @max_pkt_size: Max packet size.
+ * @ring_len: Ring length.
+ * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors.
+ * This field must be a power of 2.
+ * @pad: Padding.
+ * @dma_head_wb_addr: Applicable only for receive buffer queues.
+ * @qflags: Applicable only for receive completion queues.
+ * See enum virtchnl2_rxq_flags.
+ * @rx_buffer_low_watermark: Rx buffer low watermark.
+ * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with
+ * the Rx queue. Valid only in split queue model.
+ * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with
+ * the Rx queue. Valid only in split queue model.
+ * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid
+ * only if this field is set.
+ * @pad1: Padding.
+ * @ingress_pasid: Ingress PASID.
+ * @ingress_hdr_pasid: Ingress PASID header.
+ * @ingress_buf_pasid: Ingress PASID buffer.
+ * @pad2: Padding for future extensions.
+ */
+struct virtchnl2_rxq_info {
+ __le64 desc_ids;
+ __le64 dma_ring_addr;
+ __le32 type;
+ __le32 queue_id;
+ __le16 model;
+ __le16 hdr_buffer_size;
+ __le32 data_buffer_size;
+ __le32 max_pkt_size;
+ __le16 ring_len;
+ u8 buffer_notif_stride;
+ u8 pad;
+ __le64 dma_head_wb_addr;
+ __le16 qflags;
+ __le16 rx_buffer_low_watermark;
+ __le16 rx_bufq1_id;
+ __le16 rx_bufq2_id;
+ u8 bufq2_ena;
+ u8 pad1[3];
+ __le32 ingress_pasid;
+ __le32 ingress_hdr_pasid;
+ __le32 ingress_buf_pasid;
+ u8 pad2[16];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);
+
+/**
+ * struct virtchnl2_config_rx_queues - Rx queues config.
+ * @vport_id: Vport id.
+ * @num_qinfo: Number of instances.
+ * @pad: Padding.
+ * @qinfo: Rx queues config info.
+ *
+ * PF sends this message to set up parameters for one or more receive queues.
+ * This message contains an array of num_qinfo instances of virtchnl2_rxq_info
+ * structures. CP configures requested queues and returns a status code.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the vport, an error is returned and no queues are configured.
+ *
+ * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES.
+ */
+struct virtchnl2_config_rx_queues {
+ __le32 vport_id;
+ __le16 num_qinfo;
+ u8 pad[18];
+ struct virtchnl2_rxq_info qinfo[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
+
+/**
+ * struct virtchnl2_add_queues - data for VIRTCHNL2_OP_ADD_QUEUES.
+ * @vport_id: Vport id.
+ * @num_tx_q: Number of Tx qieues.
+ * @num_tx_complq: Number of Tx completion queues.
+ * @num_rx_q: Number of Rx queues.
+ * @num_rx_bufq: Number of Rx buffer queues.
+ * @pad: Padding.
+ * @chunks: Chunks of contiguous queues.
+ *
+ * PF sends this message to request additional transmit/receive queues beyond
+ * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues
+ * structure is used to specify the number of each type of queues.
+ * CP responds with the same structure with the actual number of queues assigned
+ * followed by num_chunks of virtchnl2_queue_chunk structures.
+ *
+ * Associated with VIRTCHNL2_OP_ADD_QUEUES.
+ */
+struct virtchnl2_add_queues {
+ __le32 vport_id;
+ __le16 num_tx_q;
+ __le16 num_tx_complq;
+ __le16 num_rx_q;
+ __le16 num_rx_bufq;
+ u8 pad[4];
+ struct virtchnl2_queue_reg_chunks chunks;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_add_queues);
+
+/**
+ * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous
+ * interrupt vectors.
+ * @start_vector_id: Start vector id.
+ * @start_evv_id: Start EVV id.
+ * @num_vectors: Number of vectors.
+ * @pad: Padding.
+ * @dynctl_reg_start: DYN_CTL register offset.
+ * @dynctl_reg_spacing: register spacing between DYN_CTL registers of 2
+ * consecutive vectors.
+ * @itrn_reg_start: ITRN register offset.
+ * @itrn_reg_spacing: Register spacing between dynctl registers of 2
+ * consecutive vectors.
+ * @itrn_index_spacing: Register spacing between itrn registers of the same
+ * vector where n=0..2.
+ * @pad1: Padding for future extensions.
+ *
+ * Register offsets and spacing provided by CP.
+ * Dynamic control registers are used for enabling/disabling/re-enabling
+ * interrupts and updating interrupt rates in the hotpath. Any changes
+ * to interrupt rates in the dynamic control registers will be reflected
+ * in the interrupt throttling rate registers.
+ * itrn registers are used to update interrupt rates for specific
+ * interrupt indices without modifying the state of the interrupt.
+ */
+struct virtchnl2_vector_chunk {
+ __le16 start_vector_id;
+ __le16 start_evv_id;
+ __le16 num_vectors;
+ __le16 pad;
+ __le32 dynctl_reg_start;
+ __le32 dynctl_reg_spacing;
+ __le32 itrn_reg_start;
+ __le32 itrn_reg_spacing;
+ __le32 itrn_index_spacing;
+ u8 pad1[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
+
+/**
+ * struct virtchnl2_vector_chunks - chunks of contiguous interrupt vectors.
+ * @num_vchunks: number of vector chunks.
+ * @pad: Padding.
+ * @vchunks: Chunks of contiguous vector info.
+ *
+ * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving
+ * away. CP performs requested action and returns status.
+ *
+ * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS.
+ */
+struct virtchnl2_vector_chunks {
+ __le16 num_vchunks;
+ u8 pad[14];
+ struct virtchnl2_vector_chunk vchunks[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
+
+/**
+ * struct virtchnl2_alloc_vectors - vector allocation info.
+ * @num_vectors: Number of vectors.
+ * @pad: Padding.
+ * @vchunks: Chunks of contiguous vector info.
+ *
+ * PF sends this message to request additional interrupt vectors beyond the
+ * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors
+ * structure is used to specify the number of vectors requested. CP responds
+ * with the same structure with the actual number of vectors assigned followed
+ * by virtchnl2_vector_chunks structure identifying the vector ids.
+ *
+ * Associated with VIRTCHNL2_OP_ALLOC_VECTORS.
+ */
+struct virtchnl2_alloc_vectors {
+ __le16 num_vectors;
+ u8 pad[14];
+ struct virtchnl2_vector_chunks vchunks;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_alloc_vectors);
+
+/**
+ * struct virtchnl2_rss_lut - RSS LUT info.
+ * @vport_id: Vport id.
+ * @lut_entries_start: Start of LUT entries.
+ * @lut_entries: Number of LUT entrties.
+ * @pad: Padding.
+ * @lut: RSS lookup table.
+ *
+ * PF sends this message to get or set RSS lookup table. Only supported if
+ * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
+ * negotiation.
+ *
+ * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT.
+ */
+struct virtchnl2_rss_lut {
+ __le32 vport_id;
+ __le16 lut_entries_start;
+ __le16 lut_entries;
+ u8 pad[4];
+ __le32 lut[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
+
+/**
+ * struct virtchnl2_rss_hash - RSS hash info.
+ * @ptype_groups: Packet type groups bitmap.
+ * @vport_id: Vport id.
+ * @pad: Padding for future extensions.
+ *
+ * PF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the CP sets these to all possible traffic types that the
+ * hardware supports. The PF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit
+ * during configuration negotiation.
+ *
+ * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH
+ */
+struct virtchnl2_rss_hash {
+ __le64 ptype_groups;
+ __le32 vport_id;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);
+
+/**
+ * struct virtchnl2_sriov_vfs_info - VFs info.
+ * @num_vfs: Number of VFs.
+ * @pad: Padding for future extensions.
+ *
+ * This message is used to set number of SRIOV VFs to be created. The actual
+ * allocation of resources for the VFs in terms of vport, queues and interrupts
+ * is done by CP. When this call completes, the IDPF driver calls
+ * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.
+ * The number of VFs set to 0 will destroy all the VFs of this function.
+ *
+ * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS.
+ */
+struct virtchnl2_sriov_vfs_info {
+ __le16 num_vfs;
+ __le16 pad;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
+
+/**
+ * struct virtchnl2_ptype - Packet type info.
+ * @ptype_id_10: 10-bit packet type.
+ * @ptype_id_8: 8-bit packet type.
+ * @proto_id_count: Number of protocol ids the packet supports, maximum of 32
+ * protocol ids are supported.
+ * @pad: Padding.
+ * @proto_id: proto_id_count decides the allocation of protocol id array.
+ * See enum virtchnl2_proto_hdr_type.
+ *
+ * Based on the descriptor type the PF supports, CP fills ptype_id_10 or
+ * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
+ * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the
+ * last ptype.
+ */
+struct virtchnl2_ptype {
+ __le16 ptype_id_10;
+ u8 ptype_id_8;
+ u8 proto_id_count;
+ __le16 pad;
+ __le16 proto_id[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
+
+/**
+ * struct virtchnl2_get_ptype_info - Packet type info.
+ * @start_ptype_id: Starting ptype ID.
+ * @num_ptypes: Number of packet types from start_ptype_id.
+ * @pad: Padding for future extensions.
+ *
+ * The total number of supported packet types is based on the descriptor type.
+ * For the flex descriptor, it is 1024 (10-bit ptype), and for the base
+ * descriptor, it is 256 (8-bit ptype). Send this message to the CP by
+ * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the
+ * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that
+ * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There
+ * is no specific field for the ptypes but are added at the end of the
+ * ptype info message. PF/VF is expected to extract the ptypes accordingly.
+ * Reason for doing this is because compiler doesn't allow nested flexible
+ * array fields).
+ *
+ * If all the ptypes don't fit into one mailbox buffer, CP splits the
+ * ptype info into multiple messages, where each message will have its own
+ * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done
+ * updating all the ptype information extracted from the package (the number of
+ * ptypes extracted might be less than what PF/VF expects), it will append a
+ * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF)
+ * to the ptype array.
+ *
+ * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages.
+ *
+ * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO.
+ */
+struct virtchnl2_get_ptype_info {
+ __le16 start_ptype_id;
+ __le16 num_ptypes;
+ __le32 pad;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_ptype_info);
+
+/**
+ * struct virtchnl2_vport_stats - Vport statistics.
+ * @vport_id: Vport id.
+ * @pad: Padding.
+ * @rx_bytes: Received bytes.
+ * @rx_unicast: Received unicast packets.
+ * @rx_multicast: Received multicast packets.
+ * @rx_broadcast: Received broadcast packets.
+ * @rx_discards: Discarded packets on receive.
+ * @rx_errors: Receive errors.
+ * @rx_unknown_protocol: Unlnown protocol.
+ * @tx_bytes: Transmitted bytes.
+ * @tx_unicast: Transmitted unicast packets.
+ * @tx_multicast: Transmitted multicast packets.
+ * @tx_broadcast: Transmitted broadcast packets.
+ * @tx_discards: Discarded packets on transmit.
+ * @tx_errors: Transmit errors.
+ * @rx_invalid_frame_length: Packets with invalid frame length.
+ * @rx_overflow_drop: Packets dropped on buffer overflow.
+ *
+ * PF/VF sends this message to CP to get the update stats by specifying the
+ * vport_id. CP responds with stats in struct virtchnl2_vport_stats.
+ *
+ * Associated with VIRTCHNL2_OP_GET_STATS.
+ */
+struct virtchnl2_vport_stats {
+ __le32 vport_id;
+ u8 pad[4];
+ __le64 rx_bytes;
+ __le64 rx_unicast;
+ __le64 rx_multicast;
+ __le64 rx_broadcast;
+ __le64 rx_discards;
+ __le64 rx_errors;
+ __le64 rx_unknown_protocol;
+ __le64 tx_bytes;
+ __le64 tx_unicast;
+ __le64 tx_multicast;
+ __le64 tx_broadcast;
+ __le64 tx_discards;
+ __le64 tx_errors;
+ __le64 rx_invalid_frame_length;
+ __le64 rx_overflow_drop;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
+
+/**
+ * struct virtchnl2_event - Event info.
+ * @event: Event opcode. See enum virtchnl2_event_codes.
+ * @link_speed: Link_speed provided in Mbps.
+ * @vport_id: Vport ID.
+ * @link_status: Link status.
+ * @pad: Padding.
+ * @reserved: Reserved.
+ *
+ * CP sends this message to inform the PF/VF driver of events that may affect
+ * it. No direct response is expected from the driver, though it may generate
+ * other messages in response to this one.
+ *
+ * Associated with VIRTCHNL2_OP_EVENT.
+ */
+struct virtchnl2_event {
+ __le32 event;
+ __le32 link_speed;
+ __le32 vport_id;
+ u8 link_status;
+ u8 pad;
+ __le16 reserved;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);
+
+/**
+ * struct virtchnl2_rss_key - RSS key info.
+ * @vport_id: Vport id.
+ * @key_len: Length of RSS key.
+ * @pad: Padding.
+ * @key_flex: RSS hash key, packed bytes.
+ * PF/VF sends this message to get or set RSS key. Only supported if both
+ * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
+ * negotiation.
+ *
+ * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY.
+ */
+struct virtchnl2_rss_key {
+ __le32 vport_id;
+ __le16 key_len;
+ u8 pad;
+ __DECLARE_FLEX_ARRAY(u8, key_flex);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);
+
+/**
+ * struct virtchnl2_queue_chunk - chunk of contiguous queues
+ * @type: See enum virtchnl2_queue_type.
+ * @start_queue_id: Starting queue id.
+ * @num_queues: Number of queues.
+ * @pad: Padding for future extensions.
+ */
+struct virtchnl2_queue_chunk {
+ __le32 type;
+ __le32 start_queue_id;
+ __le32 num_queues;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
+
+/* struct virtchnl2_queue_chunks - chunks of contiguous queues
+ * @num_chunks: Number of chunks.
+ * @pad: Padding.
+ * @chunks: Chunks of contiguous queues info.
+ */
+struct virtchnl2_queue_chunks {
+ __le16 num_chunks;
+ u8 pad[6];
+ struct virtchnl2_queue_chunk chunks[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
+
+/**
+ * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info.
+ * @vport_id: Vport id.
+ * @pad: Padding.
+ * @chunks: Chunks of contiguous queues info.
+ *
+ * PF sends these messages to enable, disable or delete queues specified in
+ * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues
+ * to be enabled/disabled/deleted. Also applicable to single queue receive or
+ * transmit. CP performs requested action and returns status.
+ *
+ * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and
+ * VIRTCHNL2_OP_DISABLE_QUEUES.
+ */
+struct virtchnl2_del_ena_dis_queues {
+ __le32 vport_id;
+ u8 pad[4];
+ struct virtchnl2_queue_chunks chunks;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_del_ena_dis_queues);
+
+/**
+ * struct virtchnl2_queue_vector - Queue to vector mapping.
+ * @queue_id: Queue id.
+ * @vector_id: Vector id.
+ * @pad: Padding.
+ * @itr_idx: See enum virtchnl2_itr_idx.
+ * @queue_type: See enum virtchnl2_queue_type.
+ * @pad1: Padding for future extensions.
+ */
+struct virtchnl2_queue_vector {
+ __le32 queue_id;
+ __le16 vector_id;
+ u8 pad[2];
+ __le32 itr_idx;
+ __le32 queue_type;
+ u8 pad1[8];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);
+
+/**
+ * struct virtchnl2_queue_vector_maps - Map/unmap queues info.
+ * @vport_id: Vport id.
+ * @num_qv_maps: Number of queue vector maps.
+ * @pad: Padding.
+ * @qv_maps: Queue to vector maps.
+ *
+ * PF sends this message to map or unmap queues to vectors and interrupt
+ * throttling rate index registers. External data buffer contains
+ * virtchnl2_queue_vector_maps structure that contains num_qv_maps of
+ * virtchnl2_queue_vector structures. CP maps the requested queue vector maps
+ * after validating the queue and vector ids and returns a status code.
+ *
+ * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and
+ * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR.
+ */
+struct virtchnl2_queue_vector_maps {
+ __le32 vport_id;
+ __le16 num_qv_maps;
+ u8 pad[10];
+ struct virtchnl2_queue_vector qv_maps[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
+
+/**
+ * struct virtchnl2_loopback - Loopback info.
+ * @vport_id: Vport id.
+ * @enable: Enable/disable.
+ * @pad: Padding for future extensions.
+ *
+ * PF/VF sends this message to transition to/from the loopback state. Setting
+ * the 'enable' to 1 enables the loopback state and setting 'enable' to 0
+ * disables it. CP configures the state to loopback and returns status.
+ *
+ * Associated with VIRTCHNL2_OP_LOOPBACK.
+ */
+struct virtchnl2_loopback {
+ __le32 vport_id;
+ u8 enable;
+ u8 pad[3];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);
+
+/* struct virtchnl2_mac_addr - MAC address info.
+ * @addr: MAC address.
+ * @type: MAC type. See enum virtchnl2_mac_addr_type.
+ * @pad: Padding for future extensions.
+ */
+struct virtchnl2_mac_addr {
+ u8 addr[ETH_ALEN];
+ u8 type;
+ u8 pad;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);
+
+/**
+ * struct virtchnl2_mac_addr_list - List of MAC addresses.
+ * @vport_id: Vport id.
+ * @num_mac_addr: Number of MAC addresses.
+ * @pad: Padding.
+ * @mac_addr_list: List with MAC address info.
+ *
+ * PF/VF driver uses this structure to send list of MAC addresses to be
+ * added/deleted to the CP where as CP performs the action and returns the
+ * status.
+ *
+ * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR.
+ */
+struct virtchnl2_mac_addr_list {
+ __le32 vport_id;
+ __le16 num_mac_addr;
+ u8 pad[2];
+ struct virtchnl2_mac_addr mac_addr_list[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
+
+/**
+ * struct virtchnl2_promisc_info - Promisc type info.
+ * @vport_id: Vport id.
+ * @flags: See enum virtchnl2_promisc_flags.
+ * @pad: Padding for future extensions.
+ *
+ * PF/VF sends vport id and flags to the CP where as CP performs the action
+ * and returns the status.
+ *
+ * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE.
+ */
+struct virtchnl2_promisc_info {
+ __le32 vport_id;
+ /* See VIRTCHNL2_PROMISC_FLAGS definitions */
+ __le16 flags;
+ u8 pad[2];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
+
+#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h b/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h
new file mode 100644
index 000000000000..f1b577f1c452
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _VIRTCHNL2_LAN_DESC_H_
+#define _VIRTCHNL2_LAN_DESC_H_
+
+#include <linux/bits.h>
+
+/* This is an interface definition file where existing enums and their values
+ * must remain unchanged over time, so we specify explicit values for all enums.
+ */
+
+/* Transmit descriptor ID flags
+ */
+enum virtchnl2_tx_desc_ids {
+ VIRTCHNL2_TXDID_DATA = BIT(0),
+ VIRTCHNL2_TXDID_CTX = BIT(1),
+ /* TXDID bit 2 is reserved
+ * TXDID bit 3 is free for future use
+ * TXDID bit 4 is reserved
+ */
+ VIRTCHNL2_TXDID_FLEX_TSO_CTX = BIT(5),
+ /* TXDID bit 6 is reserved */
+ VIRTCHNL2_TXDID_FLEX_L2TAG1_L2TAG2 = BIT(7),
+ /* TXDID bits 8 and 9 are free for future use
+ * TXDID bit 10 is reserved
+ * TXDID bit 11 is free for future use
+ */
+ VIRTCHNL2_TXDID_FLEX_FLOW_SCHED = BIT(12),
+ /* TXDID bits 13 and 14 are free for future use */
+ VIRTCHNL2_TXDID_DESC_DONE = BIT(15),
+};
+
+/* Receive descriptor IDs */
+enum virtchnl2_rx_desc_ids {
+ VIRTCHNL2_RXDID_1_32B_BASE = 1,
+ /* FLEX_SQ_NIC and FLEX_SPLITQ share desc ids because they can be
+ * differentiated based on queue model; e.g. single queue model can
+ * only use FLEX_SQ_NIC and split queue model can only use FLEX_SPLITQ
+ * for DID 2.
+ */
+ VIRTCHNL2_RXDID_2_FLEX_SPLITQ = 2,
+ VIRTCHNL2_RXDID_2_FLEX_SQ_NIC = VIRTCHNL2_RXDID_2_FLEX_SPLITQ,
+ /* 3 through 6 are reserved */
+ VIRTCHNL2_RXDID_7_HW_RSVD = 7,
+ /* 8 through 15 are free */
+};
+
+/* Receive descriptor ID bitmasks */
+#define VIRTCHNL2_RXDID_M(bit) BIT_ULL(VIRTCHNL2_RXDID_##bit)
+
+enum virtchnl2_rx_desc_id_bitmasks {
+ VIRTCHNL2_RXDID_1_32B_BASE_M = VIRTCHNL2_RXDID_M(1_32B_BASE),
+ VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M = VIRTCHNL2_RXDID_M(2_FLEX_SPLITQ),
+ VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL2_RXDID_M(2_FLEX_SQ_NIC),
+ VIRTCHNL2_RXDID_7_HW_RSVD_M = VIRTCHNL2_RXDID_M(7_HW_RSVD),
+};
+
+/* For splitq virtchnl2_rx_flex_desc_adv_nic_3 desc members */
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M GENMASK(3, 0)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M GENMASK(7, 6)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M GENMASK(9, 0)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_S 12
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M GENMASK(15, 13)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M GENMASK(13, 0)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S 14
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S 15
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M GENMASK(9, 0)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S 10
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S 11
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S 12
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M GENMASK(14, 12)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S 15
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S)
+
+/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */
+enum virtchl2_rx_flex_desc_adv_status_error_0_qw1_bits {
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_DD_M = BIT(0),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M = BIT(1),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M = BIT(2),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M = BIT(3),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M = BIT(5),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M = BIT(6),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_M = BIT(7),
+};
+
+/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */
+enum virtchnl2_rx_flex_desc_adv_status_error_0_qw0_bits {
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LPBK_M = BIT(0),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M = BIT(1),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RXE_M = BIT(2),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_CRCP_M = BIT(3),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_M = BIT(5),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_M = BIT(6),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_M = BIT(7),
+};
+
+/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */
+enum virtchnl2_rx_flex_desc_adv_status_error_1_bits {
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_RSVD_M = GENMASK(1, 0),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_M = BIT(2),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_M = BIT(3),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_M = BIT(5),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_M = BIT(6),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_M = BIT(7),
+};
+
+/* For singleq (flex) virtchnl2_rx_flex_desc fields
+ * For virtchnl2_rx_flex_desc.ptype_flex_flags0 member
+ */
+#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M GENMASK(9, 0)
+
+/* For virtchnl2_rx_flex_desc.pkt_len member */
+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M GENMASK(13, 0)
+
+/* Bitmasks for singleq (flex) virtchnl2_rx_flex_desc */
+enum virtchnl2_rx_flex_desc_status_error_0_bits {
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_M = BIT(0),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_M = BIT(1),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_HBO_M = BIT(2),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_M = BIT(3),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_M = BIT(5),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_M = BIT(6),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_M = BIT(7),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_LPBK_M = BIT(8),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_M = BIT(9),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_RXE_M = BIT(10),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_CRCP_M = BIT(11),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M = BIT(12),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_L2TAG1P_M = BIT(13),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_M = BIT(14),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_M = BIT(15),
+};
+
+/* Bitmasks for singleq (flex) virtchnl2_rx_flex_desc */
+enum virtchnl2_rx_flex_desc_status_error_1_bits {
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_CPM_M = GENMASK(3, 0),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_CRYPTO_M = BIT(5),
+ /* [10:6] reserved */
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_L2TAG2P_M = BIT(11),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_M = BIT(12),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_M = BIT(13),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_M = BIT(14),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_M = BIT(15),
+};
+
+/* For virtchnl2_rx_flex_desc.ts_low member */
+#define VIRTCHNL2_RX_FLEX_TSTAMP_VALID BIT(0)
+
+/* For singleq (non flex) virtchnl2_singleq_base_rx_desc legacy desc members */
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M GENMASK_ULL(51, 38)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M GENMASK_ULL(37, 30)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M GENMASK_ULL(26, 19)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M GENMASK_ULL(18, 0)
+
+/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */
+enum virtchnl2_rx_base_desc_status_bits {
+ VIRTCHNL2_RX_BASE_DESC_STATUS_DD_M = BIT(0),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M = BIT(1),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_L2TAG1P_M = BIT(2),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_M = BIT(3),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_CRCP_M = BIT(4),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD_M = GENMASK(7, 5),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_EXT_UDP_0_M = BIT(8),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_UMBCAST_M = GENMASK(10, 9),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_FLM_M = BIT(11),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_FLTSTAT_M = GENMASK(13, 12),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_LPBK_M = BIT(14),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M = BIT(15),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD1_M = GENMASK(17, 16),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_INT_UDP_0_M = BIT(18),
+};
+
+/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */
+enum virtchnl2_rx_base_desc_error_bits {
+ VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M = BIT(0),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_ATRAEFAIL_M = BIT(1),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_HBO_M = BIT(2),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_L3L4E_M = GENMASK(5, 3),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_M = BIT(3),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_M = BIT(4),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_M = BIT(5),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_OVERSIZE_M = BIT(6),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_M = BIT(7),
+};
+
+/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */
+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M GENMASK(13, 12)
+
+/**
+ * struct virtchnl2_splitq_rx_buf_desc - SplitQ RX buffer descriptor format
+ * @qword0: RX buffer struct.
+ * @qword0.buf_id: Buffer identifier.
+ * @qword0.rsvd0: Reserved.
+ * @qword0.rsvd1: Reserved.
+ * @pkt_addr: Packet buffer address.
+ * @hdr_addr: Header buffer address.
+ * @rsvd2: Rerserved.
+ *
+ * Receive Descriptors
+ * SplitQ buffer
+ * | 16| 0|
+ * ----------------------------------------------------------------
+ * | RSV | Buffer ID |
+ * ----------------------------------------------------------------
+ * | Rx packet buffer address |
+ * ----------------------------------------------------------------
+ * | Rx header buffer address |
+ * ----------------------------------------------------------------
+ * | RSV |
+ * ----------------------------------------------------------------
+ * | 0|
+ */
+struct virtchnl2_splitq_rx_buf_desc {
+ struct {
+ __le16 buf_id;
+ __le16 rsvd0;
+ __le32 rsvd1;
+ } qword0;
+ __le64 pkt_addr;
+ __le64 hdr_addr;
+ __le64 rsvd2;
+};
+
+/**
+ * struct virtchnl2_singleq_rx_buf_desc - SingleQ RX buffer descriptor format.
+ * @pkt_addr: Packet buffer address.
+ * @hdr_addr: Header buffer address.
+ * @rsvd1: Reserved.
+ * @rsvd2: Reserved.
+ *
+ * SingleQ buffer
+ * | 0|
+ * ----------------------------------------------------------------
+ * | Rx packet buffer address |
+ * ----------------------------------------------------------------
+ * | Rx header buffer address |
+ * ----------------------------------------------------------------
+ * | RSV |
+ * ----------------------------------------------------------------
+ * | RSV |
+ * ----------------------------------------------------------------
+ * | 0|
+ */
+struct virtchnl2_singleq_rx_buf_desc {
+ __le64 pkt_addr;
+ __le64 hdr_addr;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/**
+ * struct virtchnl2_singleq_base_rx_desc - RX descriptor writeback format.
+ * @qword0: First quad word struct.
+ * @qword0.lo_dword: Lower dual word struct.
+ * @qword0.lo_dword.mirroring_status: Mirrored packet status.
+ * @qword0.lo_dword.l2tag1: Stripped L2 tag from the received packet.
+ * @qword0.hi_dword: High dual word union.
+ * @qword0.hi_dword.rss: RSS hash.
+ * @qword0.hi_dword.fd_id: Flow director filter id.
+ * @qword1: Second quad word struct.
+ * @qword1.status_error_ptype_len: Status/error/PTYPE/length.
+ * @qword2: Third quad word struct.
+ * @qword2.ext_status: Extended status.
+ * @qword2.rsvd: Reserved.
+ * @qword2.l2tag2_1: Extracted L2 tag 2 from the packet.
+ * @qword2.l2tag2_2: Reserved.
+ * @qword3: Fourth quad word struct.
+ * @qword3.reserved: Reserved.
+ * @qword3.fd_id: Flow director filter id.
+ *
+ * Profile ID 0x1, SingleQ, base writeback format
+ */
+struct virtchnl2_singleq_base_rx_desc {
+ struct {
+ struct {
+ __le16 mirroring_status;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss;
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ __le64 status_error_ptype_len;
+ } qword1;
+ struct {
+ __le16 ext_status;
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ __le32 reserved;
+ __le32 fd_id;
+ } qword3;
+};
+
+/**
+ * struct virtchnl2_rx_flex_desc_nic - RX descriptor writeback format.
+ *
+ * @rxdid: Descriptor builder profile id.
+ * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0]
+ * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0]
+ * @pkt_len: Packet length, [15:14] are reserved.
+ * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0].
+ * @status_error0: Status/Error section 0.
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @rss_hash: RSS hash.
+ * @status_error1: Status/Error section 1.
+ * @flexi_flags2: Flexible flags section 2.
+ * @ts_low: Lower word of timestamp value.
+ * @l2tag2_1st: First L2TAG2.
+ * @l2tag2_2nd: Second L2TAG2.
+ * @flow_id: Flow id.
+ * @flex_ts: Timestamp and flexible flow id union.
+ * @flex_ts.ts_high: Timestamp higher word of the timestamp value.
+ * @flex_ts.flex.rsvd: Reserved.
+ * @flex_ts.flex.flow_id_ipv6: IPv6 flow id.
+ *
+ * Profile ID 0x2, SingleQ, flex writeback format
+ */
+struct virtchnl2_rx_flex_desc_nic {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flex_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+ /* Qword 3 */
+ __le32 flow_id;
+ union {
+ struct {
+ __le16 rsvd;
+ __le16 flow_id_ipv6;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
+/**
+ * struct virtchnl2_rx_flex_desc_adv_nic_3 - RX descriptor writeback format.
+ * @rxdid_ucast: ucast=[7:6], rsvd=[5:4], profile_id=[3:0].
+ * @status_err0_qw0: Status/Error section 0 in quad word 0.
+ * @ptype_err_fflags0: ff0=[15:12], udp_len_err=[11], ip_hdr_err=[10],
+ * ptype=[9:0].
+ * @pktlen_gen_bufq_id: bufq_id=[15] only in splitq, gen=[14] only in splitq,
+ * plen=[13:0].
+ * @hdrlen_flags: miss_prepend=[15], trunc_mirr=[14], int_udp_0=[13],
+ * ext_udp0=[12], sph=[11] only in splitq, rsc=[10]
+ * only in splitq, header=[9:0].
+ * @status_err0_qw1: Status/Error section 0 in quad word 1.
+ * @status_err1: Status/Error section 1.
+ * @fflags1: Flexible flags section 1.
+ * @ts_low: Lower word of timestamp value.
+ * @buf_id: Buffer identifier. Only in splitq mode.
+ * @misc: Union.
+ * @misc.raw_cs: Raw checksum.
+ * @misc.l2tag1: Stripped L2 tag from the received packet
+ * @misc.rscseglen:
+ * @hash1: Lower bits of Rx hash value.
+ * @ff2_mirrid_hash2: Union.
+ * @ff2_mirrid_hash2.fflags2: Flexible flags section 2.
+ * @ff2_mirrid_hash2.mirrorid: Mirror id.
+ * @ff2_mirrid_hash2.rscseglen: RSC segment length.
+ * @hash3: Upper bits of Rx hash value.
+ * @l2tag2: Extracted L2 tag 2 from the packet.
+ * @fmd4: Flexible metadata container 4.
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @fmd6: Flexible metadata container 6.
+ * @ts_high: Timestamp higher word of the timestamp value.
+ *
+ * Profile ID 0x2, SplitQ, flex writeback format
+ *
+ * Flex-field 0: BufferID
+ * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW)
+ * Flex-field 2: Hash[15:0]
+ * Flex-flags 2: Hash[23:16]
+ * Flex-field 3: L2TAG2
+ * Flex-field 5: L2TAG1
+ * Flex-field 7: Timestamp (upper 32 bits)
+ */
+struct virtchnl2_rx_flex_desc_adv_nic_3 {
+ /* Qword 0 */
+ u8 rxdid_ucast;
+ u8 status_err0_qw0;
+ __le16 ptype_err_fflags0;
+ __le16 pktlen_gen_bufq_id;
+ __le16 hdrlen_flags;
+ /* Qword 1 */
+ u8 status_err0_qw1;
+ u8 status_err1;
+ u8 fflags1;
+ u8 ts_low;
+ __le16 buf_id;
+ union {
+ __le16 raw_cs;
+ __le16 l2tag1;
+ __le16 rscseglen;
+ } misc;
+ /* Qword 2 */
+ __le16 hash1;
+ union {
+ u8 fflags2;
+ u8 mirrorid;
+ u8 hash2;
+ } ff2_mirrid_hash2;
+ u8 hash3;
+ __le16 l2tag2;
+ __le16 fmd4;
+ /* Qword 3 */
+ __le16 l2tag1;
+ __le16 fmd6;
+ __le32 ts_high;
+};
+
+/* Common union for accessing descriptor format structs */
+union virtchnl2_rx_desc {
+ struct virtchnl2_singleq_base_rx_desc base_wb;
+ struct virtchnl2_rx_flex_desc_nic flex_nic_wb;
+ struct virtchnl2_rx_flex_desc_adv_nic_3 flex_adv_nic_3_wb;
+};
+
+#endif /* _VIRTCHNL_LAN_DESC_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 76b34cee1da3..2ac9dffd0bf8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7857,7 +7857,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
{
struct pci_dev *pdev = adapter->pdev;
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
- struct list_head *pos;
struct vf_mac_filter *entry = NULL;
int ret = 0;
@@ -7878,8 +7877,7 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
switch (info) {
case E1000_VF_MAC_FILTER_CLR:
/* remove all unicast MAC filters related to the current VF */
- list_for_each(pos, &adapter->vf_macs.l) {
- entry = list_entry(pos, struct vf_mac_filter, l);
+ list_for_each_entry(entry, &adapter->vf_macs.l, l) {
if (entry->vf == vf) {
entry->vf = -1;
entry->free = true;
@@ -7889,8 +7887,7 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
break;
case E1000_VF_MAC_FILTER_ADD:
/* try to find empty slot in the list */
- list_for_each(pos, &adapter->vf_macs.l) {
- entry = list_entry(pos, struct vf_mac_filter, l);
+ list_for_each_entry(entry, &adapter->vf_macs.l, l) {
if (entry->free)
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index dd03b017dfc5..94bde2cad0f4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2421,7 +2421,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
if (xdp_xmit & IXGBE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 29cc60988071..4c6e2a485d8e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -639,12 +639,10 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
struct vf_macvlans *entry;
- struct list_head *pos;
int retval = 0;
if (index <= 1) {
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
+ list_for_each_entry(entry, &adapter->vf_mvs.l, l) {
if (entry->vf == vf) {
entry->vf = -1;
entry->free = true;
@@ -664,8 +662,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry = NULL;
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
+ list_for_each_entry(entry, &adapter->vf_mvs.l, l) {
if (entry->free)
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 1703c640a434..59798bc33298 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -351,7 +351,7 @@ construct_skb:
}
if (xdp_xmit & IXGBE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 5f6ae11212ae..81cf3361a1e5 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1380,13 +1380,11 @@ static int korina_probe(struct platform_device *pdev)
return rc;
}
-static int korina_remove(struct platform_device *pdev)
+static void korina_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -1405,7 +1403,7 @@ static struct platform_driver korina_driver = {
.of_match_table = of_match_ptr(korina_match),
},
.probe = korina_probe,
- .remove = korina_remove,
+ .remove_new = korina_remove,
};
module_platform_driver(korina_driver);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index f5961bdcc480..1d5b7bb6380f 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -721,8 +721,7 @@ err_out:
return err;
}
-static int
-ltq_etop_remove(struct platform_device *pdev)
+static void ltq_etop_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -732,11 +731,10 @@ ltq_etop_remove(struct platform_device *pdev)
ltq_etop_mdio_cleanup(dev);
unregister_netdev(dev);
}
- return 0;
}
static struct platform_driver ltq_mii_driver = {
- .remove = ltq_etop_remove,
+ .remove_new = ltq_etop_remove,
.driver = {
.name = "ltq_etop",
},
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 8d646c7f8c82..8bd4def3622e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -641,7 +641,7 @@ err_uninit_dma:
return err;
}
-static int xrx200_remove(struct platform_device *pdev)
+static void xrx200_remove(struct platform_device *pdev)
{
struct xrx200_priv *priv = platform_get_drvdata(pdev);
struct net_device *net_dev = priv->net_dev;
@@ -659,8 +659,6 @@ static int xrx200_remove(struct platform_device *pdev)
/* shut down hardware */
xrx200_hw_cleanup(priv);
-
- return 0;
}
static const struct of_device_id xrx200_match[] = {
@@ -671,7 +669,7 @@ MODULE_DEVICE_TABLE(of, xrx200_match);
static struct platform_driver xrx200_driver = {
.probe = xrx200_probe,
- .remove = xrx200_remove,
+ .remove_new = xrx200_remove,
.driver = {
.name = "lantiq,xrx200-net",
.of_match_table = xrx200_match,
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index ffa96059079c..5182fe737c37 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -294,13 +294,11 @@ static int liteeth_probe(struct platform_device *pdev)
return 0;
}
-static int liteeth_remove(struct platform_device *pdev)
+static void liteeth_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
unregister_netdev(netdev);
-
- return 0;
}
static const struct of_device_id liteeth_of_match[] = {
@@ -311,7 +309,7 @@ MODULE_DEVICE_TABLE(of, liteeth_of_match);
static struct platform_driver liteeth_driver = {
.probe = liteeth_probe,
- .remove = liteeth_remove,
+ .remove_new = liteeth_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = liteeth_of_match,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 3b129a1c3381..f0bdc06d253d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2892,19 +2892,18 @@ err_put_clk:
return ret;
}
-static int mv643xx_eth_shared_remove(struct platform_device *pdev)
+static void mv643xx_eth_shared_remove(struct platform_device *pdev)
{
struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
mv643xx_eth_shared_of_remove();
if (!IS_ERR(msp->clk))
clk_disable_unprepare(msp->clk);
- return 0;
}
static struct platform_driver mv643xx_eth_shared_driver = {
.probe = mv643xx_eth_shared_probe,
- .remove = mv643xx_eth_shared_remove,
+ .remove_new = mv643xx_eth_shared_remove,
.driver = {
.name = MV643XX_ETH_SHARED_NAME,
.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
@@ -3279,7 +3278,7 @@ out:
return err;
}
-static int mv643xx_eth_remove(struct platform_device *pdev)
+static void mv643xx_eth_remove(struct platform_device *pdev)
{
struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
struct net_device *dev = mp->dev;
@@ -3293,8 +3292,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
clk_disable_unprepare(mp->clk);
free_netdev(mp->dev);
-
- return 0;
}
static void mv643xx_eth_shutdown(struct platform_device *pdev)
@@ -3311,7 +3308,7 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
static struct platform_driver mv643xx_eth_driver = {
.probe = mv643xx_eth_probe,
- .remove = mv643xx_eth_remove,
+ .remove_new = mv643xx_eth_remove,
.shutdown = mv643xx_eth_shutdown,
.driver = {
.name = MV643XX_ETH_NAME,
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 674913184ebf..89f26402f8fb 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -388,7 +388,7 @@ out_clk:
return ret;
}
-static int orion_mdio_remove(struct platform_device *pdev)
+static void orion_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
struct orion_mdio_dev *dev = bus->priv;
@@ -404,8 +404,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
clk_disable_unprepare(dev->clk[i]);
clk_put(dev->clk[i]);
}
-
- return 0;
}
static const struct of_device_id orion_mdio_match[] = {
@@ -426,7 +424,7 @@ MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
static struct platform_driver orion_mdio_driver = {
.probe = orion_mdio_probe,
- .remove = orion_mdio_remove,
+ .remove_new = orion_mdio_remove,
.driver = {
.name = "orion-mdio",
.of_match_table = orion_mdio_match,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d483b8c00ec0..90817136808d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2520,7 +2520,7 @@ next:
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets)
mvneta_update_stats(pp, &ps);
@@ -5725,7 +5725,7 @@ err_free_irq:
}
/* Device removal routine */
-static int mvneta_remove(struct platform_device *pdev)
+static void mvneta_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct mvneta_port *pp = netdev_priv(dev);
@@ -5744,8 +5744,6 @@ static int mvneta_remove(struct platform_device *pdev)
1 << pp->id);
mvneta_bm_put(pp->bm_priv);
}
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -5871,7 +5869,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match);
static struct platform_driver mvneta_driver = {
.probe = mvneta_probe,
- .remove = mvneta_remove,
+ .remove_new = mvneta_remove,
.driver = {
.name = MVNETA_DRIVER_NAME,
.of_match_table = mvneta_match,
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 46c942ef2287..3f46a0fed048 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -457,7 +457,7 @@ err_clk:
return err;
}
-static int mvneta_bm_remove(struct platform_device *pdev)
+static void mvneta_bm_remove(struct platform_device *pdev)
{
struct mvneta_bm *priv = platform_get_drvdata(pdev);
u8 all_ports_map = 0xff;
@@ -475,8 +475,6 @@ static int mvneta_bm_remove(struct platform_device *pdev)
mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static const struct of_device_id mvneta_bm_match[] = {
@@ -487,7 +485,7 @@ MODULE_DEVICE_TABLE(of, mvneta_bm_match);
static struct platform_driver mvneta_bm_driver = {
.probe = mvneta_bm_probe,
- .remove = mvneta_bm_remove,
+ .remove_new = mvneta_bm_remove,
.driver = {
.name = MVNETA_BM_DRIVER_NAME,
.of_match_table = mvneta_bm_match,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 21c3f9b015c8..f11a3f6540b0 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4027,7 +4027,7 @@ err_drop_frame:
}
if (xdp_ret & MVPP2_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets) {
struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
@@ -7662,7 +7662,7 @@ err_pp_clk:
return err;
}
-static int mvpp2_remove(struct platform_device *pdev)
+static void mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
struct fwnode_handle *fwnode = pdev->dev.fwnode;
@@ -7700,15 +7700,13 @@ static int mvpp2_remove(struct platform_device *pdev)
}
if (is_acpi_node(port_fwnode))
- return 0;
+ return;
clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(priv->mg_core_clk);
clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
clk_disable_unprepare(priv->gop_clk);
-
- return 0;
}
static const struct of_device_id mvpp2_match[] = {
@@ -7734,7 +7732,7 @@ MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
- .remove = mvpp2_remove,
+ .remove_new = mvpp2_remove,
.driver = {
.name = MVPP2_DRIVER_NAME,
.of_match_table = mvpp2_match,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
index 90c3a419932d..d4ee2454675b 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -16,9 +16,6 @@
#define CTRL_MBOX_MAX_PF 128
#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
-#define FW_HB_INTERVAL_IN_SECS 1
-#define FW_HB_MISS_COUNT 10
-
/* Names of Hardware non-queue generic interrupts */
static char *cn93_non_ioq_msix_names[] = {
"epf_ire_rint",
@@ -250,12 +247,11 @@ static void octep_init_config_cn93_pf(struct octep_device *oct)
link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
}
conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
- (0x400000ull * 7) +
+ CN93_PEM_BAR4_INDEX_OFFSET +
(link * CTRL_MBOX_SZ);
- conf->hb_interval = FW_HB_INTERVAL_IN_SECS;
- conf->max_hb_miss_cnt = FW_HB_MISS_COUNT;
-
+ conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL;
+ conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT;
}
/* Setup registers for a hardware Tx Queue */
@@ -373,34 +369,40 @@ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
}
-/* Process non-ioq interrupts required to keep pf interface running.
- * OEI_RINT is needed for control mailbox
- */
-static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
-{
- bool handled = false;
- u64 reg0;
-
- /* Check for OEI INTR */
- reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
- if (reg0) {
- dev_info(&oct->pdev->dev,
- "Received OEI_RINT intr: 0x%llx\n",
- reg0);
- octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0);
- if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
+/* Poll OEI events like heartbeat */
+static void octep_poll_oei_cn93_pf(struct octep_device *oct)
+{
+ u64 reg;
+
+ reg = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg) {
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg);
+ if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
queue_work(octep_wq, &oct->ctrl_mbox_task);
- else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
+ else if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
atomic_set(&oct->hb_miss_cnt, 0);
-
- handled = true;
}
+}
+
+/* OEI interrupt handler */
+static irqreturn_t octep_oei_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_oei_cn93_pf(oct);
+ return IRQ_HANDLED;
+}
- return handled;
+/* Process non-ioq interrupts required to keep pf interface running.
+ * OEI_RINT is needed for control mailbox
+ */
+static void octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
+{
+ octep_poll_oei_cn93_pf(oct);
}
-/* Interrupts handler for all non-queue generic interrupts. */
-static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
+/* Interrupt handler for input ring error interrupts. */
+static irqreturn_t octep_ire_intr_handler_cn93_pf(void *dev)
{
struct octep_device *oct = (struct octep_device *)dev;
struct pci_dev *pdev = oct->pdev;
@@ -425,8 +427,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
reg_val);
}
}
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for output ring error interrupts. */
+static irqreturn_t octep_ore_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
/* Check for ORERR INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
@@ -444,9 +455,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
reg_val);
}
}
-
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf input ring error interrupts. */
+static irqreturn_t octep_vfire_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for VFIRE INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
@@ -454,8 +472,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received VFIRE_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf output ring error interrupts. */
+static irqreturn_t octep_vfore_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for VFORE INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
@@ -463,19 +489,30 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received VFORE_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
- /* Check for MBOX INTR and OEI INTR */
- if (octep_poll_non_ioq_interrupts_cn93_pf(oct))
- goto irq_handled;
+/* Interrupt handler for dpi dma related interrupts. */
+static irqreturn_t octep_dma_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ u64 reg_val = 0;
/* Check for DMA INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
if (reg_val) {
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for dpi dma transaction error interrupts for VFs */
+static irqreturn_t octep_dma_vf_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for DMA VF INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
@@ -483,8 +520,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for pp transaction error interrupts for VFs */
+static irqreturn_t octep_pp_vf_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for PPVF INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
@@ -492,8 +537,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received PP_VF_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for mac related interrupts. */
+static irqreturn_t octep_misc_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for MISC INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
@@ -501,11 +554,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received MISC_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupts handler for all reserved interrupts. */
+static irqreturn_t octep_rsvd_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
-irq_handled:
return IRQ_HANDLED;
}
@@ -569,8 +628,15 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL);
+
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
}
/* Disable all interrupts */
@@ -588,8 +654,15 @@ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL);
+
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
}
/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
@@ -722,7 +795,16 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
- oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cn93_pf;
+ oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cn93_pf;
+ oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cn93_pf;
+ oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cn93_pf;
+ oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cn93_pf;
+ oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cn93_pf;
+ oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cn93_pf;
+ oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cn93_pf;
+ oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cn93_pf;
+ oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cn93_pf;
oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
index df7cd39d9fce..1622a6ebf036 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -49,6 +49,11 @@
/* Default MTU */
#define OCTEP_DEFAULT_MTU 1500
+/* pf heartbeat interval in milliseconds */
+#define OCTEP_DEFAULT_FW_HB_INTERVAL 1000
+/* pf heartbeat miss count */
+#define OCTEP_DEFAULT_FW_HB_MISS_COUNT 20
+
/* Macros to get octeon config params */
#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
@@ -181,6 +186,16 @@ struct octep_ctrl_mbox_config {
void __iomem *barmem_addr;
};
+/* Info from firmware */
+struct octep_fw_info {
+ /* interface pkind */
+ u16 pkind;
+ /* heartbeat interval in milliseconds */
+ u16 hb_interval;
+ /* heartbeat miss count */
+ u16 hb_miss_count;
+};
+
/* Data Structure to hold configuration limits and active config */
struct octep_config {
/* Input Queue attributes. */
@@ -201,10 +216,7 @@ struct octep_config {
/* ctrl mbox config */
struct octep_ctrl_mbox_config ctrl_mbox_cfg;
- /* Configured maximum heartbeat miss count */
- u32 max_hb_miss_cnt;
-
- /* Configured firmware heartbeat interval in secs */
- u32 hb_interval;
+ /* fw info */
+ struct octep_fw_info fw_info;
};
#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 17bfd5cdf462..0594607a2585 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -26,7 +26,7 @@ static atomic_t ctrl_net_msg_id;
/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */
static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = {
- [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_LINK_INFO] =
+ [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_GET_INFO] =
OCTEP_CP_VERSION(1, 0, 0)
};
@@ -353,6 +353,28 @@ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
}
}
+int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
+ struct octep_fw_info *info)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_net_h2f_req *req;
+ int err;
+
+ req = &d.data.req;
+ init_send_req(&d.msg, req, 0, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_INFO;
+ req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ resp = &d.data.resp;
+ memcpy(info, &resp->info.fw_info, sizeof(struct octep_fw_info));
+
+ return 0;
+}
+
int octep_ctrl_net_uninit(struct octep_device *oct)
{
struct octep_ctrl_net_wait_data *pos, *n;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
index 1c2ef4ee31d9..b330f370131b 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -41,6 +41,7 @@ enum octep_ctrl_net_h2f_cmd {
OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+ OCTEP_CTRL_NET_H2F_CMD_GET_INFO,
OCTEP_CTRL_NET_H2F_CMD_MAX
};
@@ -161,6 +162,11 @@ struct octep_ctrl_net_h2f_resp_cmd_state {
u16 state;
};
+/* get info request */
+struct octep_ctrl_net_h2f_resp_cmd_get_info {
+ struct octep_fw_info fw_info;
+};
+
/* Host to fw response data */
struct octep_ctrl_net_h2f_resp {
union octep_ctrl_net_resp_hdr hdr;
@@ -171,6 +177,7 @@ struct octep_ctrl_net_h2f_resp {
struct octep_ctrl_net_h2f_resp_cmd_state link;
struct octep_ctrl_net_h2f_resp_cmd_state rx;
struct octep_ctrl_net_link_info link_info;
+ struct octep_ctrl_net_h2f_resp_cmd_get_info info;
};
} __packed;
@@ -330,6 +337,17 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct,
*/
void octep_ctrl_net_recv_fw_messages(struct octep_device *oct);
+/** Get info from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param info: non-null pointer to struct octep_fw_info.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
+ struct octep_fw_info *info);
+
/** Uninitialize data for ctrl net.
*
* @param oct: non-null pointer to struct octep_device.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index dbc518ff8276..7a386af041ec 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -155,18 +155,153 @@ static void octep_disable_msix(struct octep_device *oct)
}
/**
- * octep_non_ioq_intr_handler() - common handler for all generic interrupts.
+ * octep_oei_intr_handler() - common handler for output endpoint interrupts.
*
* @irq: Interrupt number.
* @data: interrupt data.
*
- * this is common handler for all non-queue (generic) interrupts.
+ * this is common handler for all output endpoint interrupts.
+ */
+static irqreturn_t octep_oei_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.oei_intr_handler(oct);
+}
+
+/**
+ * octep_ire_intr_handler() - common handler for input ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for input ring error interrupts.
+ */
+static irqreturn_t octep_ire_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.ire_intr_handler(oct);
+}
+
+/**
+ * octep_ore_intr_handler() - common handler for output ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for output ring error interrupts.
+ */
+static irqreturn_t octep_ore_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.ore_intr_handler(oct);
+}
+
+/**
+ * octep_vfire_intr_handler() - common handler for vf input ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for vf input ring error interrupts.
+ */
+static irqreturn_t octep_vfire_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.vfire_intr_handler(oct);
+}
+
+/**
+ * octep_vfore_intr_handler() - common handler for vf output ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for vf output ring error interrupts.
+ */
+static irqreturn_t octep_vfore_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.vfore_intr_handler(oct);
+}
+
+/**
+ * octep_dma_intr_handler() - common handler for dpi dma related interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for dpi dma related interrupts.
+ */
+static irqreturn_t octep_dma_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.dma_intr_handler(oct);
+}
+
+/**
+ * octep_dma_vf_intr_handler() - common handler for dpi dma transaction error interrupts for VFs.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for dpi dma transaction error interrupts for VFs.
*/
-static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data)
+static irqreturn_t octep_dma_vf_intr_handler(int irq, void *data)
{
struct octep_device *oct = data;
- return oct->hw_ops.non_ioq_intr_handler(oct);
+ return oct->hw_ops.dma_vf_intr_handler(oct);
+}
+
+/**
+ * octep_pp_vf_intr_handler() - common handler for pp transaction error interrupts for VFs.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for pp transaction error interrupts for VFs.
+ */
+static irqreturn_t octep_pp_vf_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.pp_vf_intr_handler(oct);
+}
+
+/**
+ * octep_misc_intr_handler() - common handler for mac related interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for mac related interrupts.
+ */
+static irqreturn_t octep_misc_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.misc_intr_handler(oct);
+}
+
+/**
+ * octep_rsvd_intr_handler() - common handler for reserved interrupts (future use).
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for all reserved interrupts.
+ */
+static irqreturn_t octep_rsvd_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.rsvd_intr_handler(oct);
}
/**
@@ -222,9 +357,57 @@ static int octep_request_irqs(struct octep_device *oct)
snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
"%s-%s", netdev->name, non_ioq_msix_names[i]);
- ret = request_irq(msix_entry->vector,
- octep_non_ioq_intr_handler, 0,
- irq_name, oct);
+ if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint",
+ strlen("epf_oei_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_oei_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_ire_rint",
+ strlen("epf_ire_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_ire_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_ore_rint",
+ strlen("epf_ore_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_ore_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_vfire_rint",
+ strlen("epf_vfire_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_vfire_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_vfore_rint",
+ strlen("epf_vfore_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_vfore_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_rint",
+ strlen("epf_dma_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_dma_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_vf_rint",
+ strlen("epf_dma_vf_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_dma_vf_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_pp_vf_rint",
+ strlen("epf_pp_vf_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_pp_vf_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_misc_rint",
+ strlen("epf_misc_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_misc_intr_handler, 0,
+ irq_name, oct);
+ } else {
+ ret = request_irq(msix_entry->vector,
+ octep_rsvd_intr_handler, 0,
+ irq_name, oct);
+ }
+
if (ret) {
netdev_err(netdev,
"request_irq failed for %s; err=%d",
@@ -918,9 +1101,9 @@ static void octep_hb_timeout_task(struct work_struct *work)
int miss_cnt;
miss_cnt = atomic_inc_return(&oct->hb_miss_cnt);
- if (miss_cnt < oct->conf->max_hb_miss_cnt) {
+ if (miss_cnt < oct->conf->fw_info.hb_miss_count) {
queue_delayed_work(octep_wq, &oct->hb_task,
- msecs_to_jiffies(oct->conf->hb_interval * 1000));
+ msecs_to_jiffies(oct->conf->fw_info.hb_interval));
return;
}
@@ -1013,8 +1196,7 @@ int octep_device_setup(struct octep_device *oct)
atomic_set(&oct->hb_miss_cnt, 0);
INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
- queue_delayed_work(octep_wq, &oct->hb_task,
- msecs_to_jiffies(oct->conf->hb_interval * 1000));
+
return 0;
unsupported_dev:
@@ -1143,6 +1325,15 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "Device setup failed\n");
goto err_octep_config;
}
+
+ octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
+ &octep_dev->conf->fw_info);
+ dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n",
+ octep_dev->conf->fw_info.hb_interval,
+ octep_dev->conf->fw_info.hb_miss_count);
+ queue_delayed_work(octep_wq, &octep_dev->hb_task,
+ msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval));
+
INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index e0907a719133..6df902ebb7f3 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -65,7 +65,16 @@ struct octep_hw_ops {
void (*setup_oq_regs)(struct octep_device *oct, int q);
void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
- irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*oei_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ire_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ore_intr_handler)(void *ioq_vector);
+ irqreturn_t (*vfire_intr_handler)(void *ioq_vector);
+ irqreturn_t (*vfore_intr_handler)(void *ioq_vector);
+ irqreturn_t (*dma_intr_handler)(void *ioq_vector);
+ irqreturn_t (*dma_vf_intr_handler)(void *ioq_vector);
+ irqreturn_t (*pp_vf_intr_handler)(void *ioq_vector);
+ irqreturn_t (*misc_intr_handler)(void *ioq_vector);
+ irqreturn_t (*rsvd_intr_handler)(void *ioq_vector);
irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
int (*soft_reset)(struct octep_device *oct);
void (*reinit_regs)(struct octep_device *oct);
@@ -73,7 +82,7 @@ struct octep_hw_ops {
void (*enable_interrupts)(struct octep_device *oct);
void (*disable_interrupts)(struct octep_device *oct);
- bool (*poll_non_ioq_interrupts)(struct octep_device *oct);
+ void (*poll_non_ioq_interrupts)(struct octep_device *oct);
void (*enable_io_queues)(struct octep_device *oct);
void (*disable_io_queues)(struct octep_device *oct);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index b25c3093dc7b..0a43983e9101 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -370,4 +370,8 @@
/* bit 1 for firmware heartbeat interrupt */
#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define CN93_PEM_BAR4_INDEX 7
+#define CN93_PEM_BAR4_INDEX_SIZE 0x400000ULL
+#define CN93_PEM_BAR4_INDEX_OFFSET (CN93_PEM_BAR4_INDEX * CN93_PEM_BAR4_INDEX_SIZE)
+
#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 6b5b06c2b4e9..6845556581c3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1473,6 +1473,12 @@ struct flow_msg {
u8 next_header;
};
__be16 vlan_itci;
+#define OTX2_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
+#define OTX2_FLOWER_MASK_MPLS_TC GENMASK(11, 9)
+#define OTX2_FLOWER_MASK_MPLS_BOS BIT(8)
+#define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0)
+#define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8)
+ u32 mpls_lse[4];
};
struct npc_install_flow_req {
@@ -1574,7 +1580,7 @@ enum ptp_op {
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
- PTP_OP_EXTTS_ON = 4,
+ PTP_OP_PPS_ON = 4,
PTP_OP_ADJTIME = 5,
PTP_OP_SET_CLOCK = 6,
};
@@ -1584,7 +1590,8 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
- int extts_on;
+ u64 period;
+ int pps_on;
s64 delta;
u64 clk;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index de9fbd98dfb7..ab3e39eef2eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -206,6 +206,14 @@ enum key_fields {
NPC_SPORT_SCTP,
NPC_DPORT_SCTP,
NPC_IPSEC_SPI,
+ NPC_MPLS1_LBTCBOS,
+ NPC_MPLS1_TTL,
+ NPC_MPLS2_LBTCBOS,
+ NPC_MPLS2_TTL,
+ NPC_MPLS3_LBTCBOS,
+ NPC_MPLS3_TTL,
+ NPC_MPLS4_LBTCBOS,
+ NPC_MPLS4_TTL,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index ffbd22797163..bcc96eed2481 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -46,6 +46,7 @@
#define PTP_PPS_HI_INCR 0xF60ULL
#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_LO 0xF50ULL
#define PTP_PPS_THRESH_HI 0xF58ULL
#define PTP_CLOCK_LO 0xF08ULL
@@ -411,29 +412,12 @@ void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts)
}
clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
- clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
clock_cfg |= (ATOMIC_SET << 26);
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
- /* Set 50% duty cycle for 1Hz output */
- writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
- writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
- if (cn10k_ptp_errata(ptp)) {
- /* The ptp_clock_hi rollsover to zero once clock cycle before it
- * reaches one second boundary. so, program the pps_lo_incr in
- * such a way that the pps threshold value comparison at one
- * second boundary will succeed and pps edge changes. After each
- * one second boundary, the hrtimer handler will be invoked and
- * reprograms the pps threshold value.
- */
- ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
- writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
- ptp->reg_base + PTP_PPS_LO_INCR);
- }
-
if (cn10k_ptp_errata(ptp))
clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
else
@@ -465,20 +449,68 @@ static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
return 0;
}
-static int ptp_extts_on(struct ptp *ptp, int on)
+static int ptp_config_hrtimer(struct ptp *ptp, int on)
{
u64 ptp_clock_hi;
- if (cn10k_ptp_errata(ptp)) {
- if (on) {
- ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
- ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
- } else {
- if (hrtimer_active(&ptp->hrtimer))
- hrtimer_cancel(&ptp->hrtimer);
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+
+ return 0;
+}
+
+static int ptp_pps_on(struct ptp *ptp, int on, u64 period)
+{
+ u64 clock_cfg;
+
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ if (on) {
+ if (cn10k_ptp_errata(ptp) && period != NSEC_PER_SEC) {
+ dev_err(&ptp->pdev->dev, "Supports max period value as 1 second\n");
+ return -EINVAL;
}
+
+ if (period > (8 * NSEC_PER_SEC)) {
+ dev_err(&ptp->pdev->dev, "Supports max period as 8 seconds\n");
+ return -EINVAL;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ writeq(0, ptp->reg_base + PTP_PPS_THRESH_HI);
+ writeq(0, ptp->reg_base + PTP_PPS_THRESH_LO);
+
+ /* Configure high/low phase time */
+ period = period / 2;
+ writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_LO_INCR);
+ } else {
+ clock_cfg &= ~(PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV);
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
}
+ if (on && cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
+
+ if (cn10k_ptp_errata(ptp))
+ ptp_config_hrtimer(ptp, on);
+
return 0;
}
@@ -613,8 +645,8 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
- case PTP_OP_EXTTS_ON:
- err = ptp_extts_on(rvu->ptp, req->extts_on);
+ case PTP_OP_PPS_ON:
+ err = ptp_pps_on(rvu->ptp, req->pps_on, req->period);
break;
case PTP_OP_ADJTIME:
ptp_atomic_adjtime(rvu->ptp, req->delta);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index d30e84803481..bd817ee88735 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2756,6 +2756,27 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+#define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \
+do { \
+ seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \
+ seq_printf(s, "mask 0x%lx\n", \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \
+} while (0) \
+
+#define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \
+do { \
+ typeof(_pkt) (pkt) = (_pkt); \
+ typeof(_mask) (mask) = (_mask); \
+ seq_printf(s, "%ld %ld %ld\n", \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \
+ seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \
+} while (0) \
+
static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
struct rvu_npc_mcam_rule *rule)
{
@@ -2836,6 +2857,38 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
break;
+ case NPC_MPLS1_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
+ rule->mask.mpls_lse[0]);
+ break;
+ case NPC_MPLS1_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
+ rule->mask.mpls_lse[0]);
+ break;
+ case NPC_MPLS2_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
+ rule->mask.mpls_lse[1]);
+ break;
+ case NPC_MPLS2_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
+ rule->mask.mpls_lse[1]);
+ break;
+ case NPC_MPLS3_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
+ rule->mask.mpls_lse[2]);
+ break;
+ case NPC_MPLS3_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
+ rule->mask.mpls_lse[2]);
+ break;
+ case NPC_MPLS4_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
+ rule->mask.mpls_lse[3]);
+ break;
+ case NPC_MPLS4_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
+ rule->mask.mpls_lse[3]);
+ break;
default:
seq_puts(s, "\n");
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 237f82082ebe..114e4ec21802 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -43,6 +43,14 @@ static const char * const npc_flow_names[] = {
[NPC_DPORT_SCTP] = "sctp destination port",
[NPC_LXMB] = "Mcast/Bcast header ",
[NPC_IPSEC_SPI] = "SPI ",
+ [NPC_MPLS1_LBTCBOS] = "lse depth 1 label tc bos",
+ [NPC_MPLS1_TTL] = "lse depth 1 ttl",
+ [NPC_MPLS2_LBTCBOS] = "lse depth 2 label tc bos",
+ [NPC_MPLS2_TTL] = "lse depth 2 ttl",
+ [NPC_MPLS3_LBTCBOS] = "lse depth 3 label tc bos",
+ [NPC_MPLS3_TTL] = "lse depth 3 ttl",
+ [NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos",
+ [NPC_MPLS4_TTL] = "lse depth 4",
[NPC_UNKNOWN] = "unknown",
};
@@ -528,6 +536,14 @@ do { \
NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4);
NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4);
+ NPC_SCAN_HDR(NPC_MPLS1_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 0, 3);
+ NPC_SCAN_HDR(NPC_MPLS1_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 3, 1);
+ NPC_SCAN_HDR(NPC_MPLS2_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 4, 3);
+ NPC_SCAN_HDR(NPC_MPLS2_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 7, 1);
+ NPC_SCAN_HDR(NPC_MPLS3_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 8, 3);
+ NPC_SCAN_HDR(NPC_MPLS3_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 11, 1);
+ NPC_SCAN_HDR(NPC_MPLS4_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 12, 3);
+ NPC_SCAN_HDR(NPC_MPLS4_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 15, 1);
/* SMAC follows the DMAC(which is 6 bytes) */
NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
@@ -593,6 +609,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
/* for L2M/L2B/L3M/L3B, check if the type is present in the key */
if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
*features |= BIT_ULL(NPC_LXMB);
+
+ for (hdr = NPC_MPLS1_LBTCBOS; hdr <= NPC_MPLS4_TTL; hdr++) {
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
+ *features |= BIT_ULL(hdr);
+ }
}
/* Scan key extraction profile and record how fields of our interest
@@ -959,6 +980,47 @@ do { \
NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0,
ntohs(mask->vlan_itci), 0);
+ NPC_WRITE_FLOW(NPC_MPLS1_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[0]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[0]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS1_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[0]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[0]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS2_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[1]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[1]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS2_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[1]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[1]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS3_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[2]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[2]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS3_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[2]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[2]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS4_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[3]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[3]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS4_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[3]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[3]), 0);
+
NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
mask->next_header, 0);
npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 3a72b0793d4a..63130ba37e9d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -175,7 +175,7 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static int ptp_extts_on(struct otx2_ptp *ptp, int on)
+static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period)
{
struct ptp_req *req;
@@ -186,8 +186,9 @@ static int ptp_extts_on(struct otx2_ptp *ptp, int on)
if (!req)
return -ENOMEM;
- req->op = PTP_OP_EXTTS_ON;
- req->extts_on = on;
+ req->op = PTP_OP_PPS_ON;
+ req->pps_on = on;
+ req->period = period;
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
@@ -276,8 +277,8 @@ static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
switch (func) {
case PTP_PF_NONE:
case PTP_PF_EXTTS:
- break;
case PTP_PF_PEROUT:
+ break;
case PTP_PF_PHYSYNC:
return -1;
}
@@ -340,6 +341,7 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
+ u64 period = 0;
int pin;
if (!ptp->nic)
@@ -351,12 +353,24 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
- if (on) {
- ptp_extts_on(ptp, on);
+ if (on)
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
- } else {
- ptp_extts_on(ptp, on);
+ else
cancel_delayed_work_sync(&ptp->extts_work);
+
+ return 0;
+ case PTP_CLK_REQ_PEROUT:
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index >= ptp_info->n_pins)
+ return -EINVAL;
+ if (on) {
+ period = rq->perout.period.sec * NSEC_PER_SEC +
+ rq->perout.period.nsec;
+ ptp_pps_on(ptp, on, period);
+ } else {
+ ptp_pps_on(ptp, on, period);
}
return 0;
default:
@@ -411,6 +425,7 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
.n_ext_ts = 1,
+ .n_per_out = 1,
.n_pins = 1,
.pps = 0,
.pin_config = &ptp_ptr->extts_config,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index fab9d85bfb37..8a5e3987a482 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -27,6 +27,8 @@
#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
+#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -519,6 +521,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -738,6 +741,61 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_match_mpls match;
+ u8 bit;
+
+ flow_rule_match_mpls(rule, &match);
+
+ if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported LSE depth for MPLS match offload");
+ return -EOPNOTSUPP;
+ }
+
+ for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses,
+ FLOW_DIS_MPLS_MAX) {
+ /* check if any of the fields LABEL,TC,BOS are set */
+ if (*((u32 *)&match.mask->ls[bit]) &
+ OTX2_FLOWER_MASK_MPLS_NON_TTL) {
+ /* Hardware will capture 4 byte MPLS header into
+ * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL.
+ * Derive the associated NPC key based on header
+ * index and offset.
+ */
+
+ req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS +
+ 2 * bit);
+ flow_spec->mpls_lse[bit] =
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
+ match.key->ls[bit].mpls_label) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
+ match.key->ls[bit].mpls_tc) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
+ match.key->ls[bit].mpls_bos);
+
+ flow_mask->mpls_lse[bit] =
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
+ match.mask->ls[bit].mpls_label) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
+ match.mask->ls[bit].mpls_tc) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
+ match.mask->ls[bit].mpls_bos);
+ }
+
+ if (match.mask->ls[bit].mpls_ttl) {
+ req->features |= BIT_ULL(NPC_MPLS1_TTL +
+ 2 * bit);
+ flow_spec->mpls_lse[bit] |=
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
+ match.key->ls[bit].mpls_ttl);
+ flow_mask->mpls_lse[bit] |=
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
+ match.mask->ls[bit].mpls_ttl);
+ }
+ }
+ }
+
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index d5691b6a2bc5..dd6ca2e4fd51 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1528,7 +1528,7 @@ err_clk:
return err;
}
-static int pxa168_eth_remove(struct platform_device *pdev)
+static void pxa168_eth_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -1547,7 +1547,6 @@ static int pxa168_eth_remove(struct platform_device *pdev)
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
free_netdev(dev);
- return 0;
}
static void pxa168_eth_shutdown(struct platform_device *pdev)
@@ -1580,7 +1579,7 @@ MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
static struct platform_driver pxa168_eth_driver = {
.probe = pxa168_eth_probe,
- .remove = pxa168_eth_remove,
+ .remove_new = pxa168_eth_remove,
.shutdown = pxa168_eth_shutdown,
.resume = pxa168_eth_resume,
.suspend = pxa168_eth_suspend,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 20afe79f380a..60d49b0f595f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -197,6 +197,7 @@ static const struct mtk_reg_map mt7988_reg_map = {
.wdma_base = {
[0] = 0x4800,
[1] = 0x4c00,
+ [2] = 0x5000,
},
.pse_iq_sta = 0x0180,
.pse_oq_sta = 0x01a0,
@@ -2210,7 +2211,7 @@ rx_done:
net_dim(&eth->rx_dim, dim_sample);
if (xdp_flush)
- xdp_do_flush_map();
+ xdp_do_flush();
return done;
}
@@ -5002,7 +5003,7 @@ err_destroy_sgmii:
return err;
}
-static int mtk_remove(struct platform_device *pdev)
+static void mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
struct mtk_mac *mac;
@@ -5024,8 +5025,6 @@ static int mtk_remove(struct platform_device *pdev)
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
mtk_mdio_cleanup(eth);
-
- return 0;
}
static const struct mtk_soc_data mt2701_data = {
@@ -5226,7 +5225,7 @@ MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
- .remove = mtk_remove,
+ .remove_new = mtk_remove,
.driver = {
.name = "mtk_soc_eth",
.of_match_table = of_mtk_match,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 403219d987ef..9ae3b8a71d0e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -1132,7 +1132,7 @@ struct mtk_reg_map {
u32 gdm1_cnt;
u32 gdma_to_ppe;
u32 ppe_base;
- u32 wdma_base[2];
+ u32 wdma_base[3];
u32 pse_iq_sta;
u32 pse_oq_sta;
};
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 86f32f486043..b2a5d9c3733d 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -425,7 +425,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
}
int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
- int wdma_idx, int txq, int bss, int wcid)
+ int wdma_idx, int txq, int bss, int wcid,
+ bool amsdu_en)
{
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
@@ -437,6 +438,7 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
MTK_FOE_IB2_WDMA_WINFO_V2;
l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
+ l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
break;
case 2:
*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index e3d0ec72bc69..691806bca372 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -88,13 +88,13 @@ enum {
#define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16)
#define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0)
-#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0)
-#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16)
-#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20)
-#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21)
-#define MTK_FOE_WINFO_PAO_IS_SP BIT(22)
-#define MTK_FOE_WINFO_PAO_HF BIT(23)
-#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24)
+#define MTK_FOE_WINFO_AMSDU_USR_INFO GENMASK(15, 0)
+#define MTK_FOE_WINFO_AMSDU_TID GENMASK(19, 16)
+#define MTK_FOE_WINFO_AMSDU_IS_FIXEDRATE BIT(20)
+#define MTK_FOE_WINFO_AMSDU_IS_PRIOR BIT(21)
+#define MTK_FOE_WINFO_AMSDU_IS_SP BIT(22)
+#define MTK_FOE_WINFO_AMSDU_HF BIT(23)
+#define MTK_FOE_WINFO_AMSDU_EN BIT(24)
enum {
MTK_FOE_STATE_INVALID,
@@ -123,7 +123,7 @@ struct mtk_foe_mac_info {
/* netsys_v3 */
u32 w3info;
- u32 wpao;
+ u32 amsdu;
};
/* software-only entry type */
@@ -392,7 +392,8 @@ int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int sid);
int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
- int wdma_idx, int txq, int bss, int wcid);
+ int wdma_idx, int txq, int bss, int wcid,
+ bool amsdu_en);
int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
unsigned int queue);
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index a4efbeb16208..e073d2b5542c 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -111,6 +111,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
info->queue = path->mtk_wdma.queue;
info->bss = path->mtk_wdma.bss;
info->wcid = path->mtk_wdma.wcid;
+ info->amsdu = path->mtk_wdma.amsdu;
return 0;
}
@@ -192,14 +193,17 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
- info.bss, info.wcid);
+ info.bss, info.wcid, info.amsdu);
if (mtk_is_netsys_v2_or_greater(eth)) {
switch (info.wdma_idx) {
case 0:
- pse_port = 8;
+ pse_port = PSE_WDMA0_PORT;
break;
case 1:
- pse_port = 9;
+ pse_port = PSE_WDMA1_PORT;
+ break;
+ case 2:
+ pse_port = PSE_WDMA2_PORT;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 94376aa2b34c..9a6744c0d458 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -17,17 +17,21 @@
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include "mtk_eth_soc.h"
-#include "mtk_wed_regs.h"
#include "mtk_wed.h"
#include "mtk_ppe.h"
#include "mtk_wed_wo.h"
#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
-#define MTK_WED_PKT_SIZE 1900
+#define MTK_WED_PKT_SIZE 1920
#define MTK_WED_BUF_SIZE 2048
+#define MTK_WED_PAGE_BUF_SIZE 128
#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
#define MTK_WED_RX_RING_SIZE 1536
+#define MTK_WED_RX_PG_BM_CNT 8192
+#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
+#define MTK_WED_AMSDU_NPAGES 32
#define MTK_WED_TX_RING_SIZE 2048
#define MTK_WED_WDMA_RING_SIZE 1024
@@ -41,7 +45,10 @@
#define MTK_WED_RRO_QUE_CNT 8192
#define MTK_WED_MIOD_ENTRY_CNT 128
-static struct mtk_wed_hw *hw_list[2];
+#define MTK_WED_TX_BM_DMA_SIZE 65536
+#define MTK_WED_TX_BM_PKT_CNT 32768
+
+static struct mtk_wed_hw *hw_list[3];
static DEFINE_MUTEX(hw_lock);
struct mtk_wed_flow_block_priv {
@@ -49,6 +56,39 @@ struct mtk_wed_flow_block_priv {
struct net_device *dev;
};
+static const struct mtk_wed_soc_data mt7622_data = {
+ .regmap = {
+ .tx_bm_tkid = 0x088,
+ .wpdma_rx_ring0 = 0x770,
+ .reset_idx_tx_mask = GENMASK(3, 0),
+ .reset_idx_rx_mask = GENMASK(17, 16),
+ },
+ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
+ .wdma_desc_size = sizeof(struct mtk_wdma_desc),
+};
+
+static const struct mtk_wed_soc_data mt7986_data = {
+ .regmap = {
+ .tx_bm_tkid = 0x0c8,
+ .wpdma_rx_ring0 = 0x770,
+ .reset_idx_tx_mask = GENMASK(1, 0),
+ .reset_idx_rx_mask = GENMASK(7, 6),
+ },
+ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
+ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
+};
+
+static const struct mtk_wed_soc_data mt7988_data = {
+ .regmap = {
+ .tx_bm_tkid = 0x0c8,
+ .wpdma_rx_ring0 = 0x7d0,
+ .reset_idx_tx_mask = GENMASK(1, 0),
+ .reset_idx_rx_mask = GENMASK(7, 6),
+ },
+ .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc),
+ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
+};
+
static void
wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
{
@@ -109,6 +149,90 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
return wdma_r32(dev, MTK_WDMA_GLO_CFG);
}
+static void
+mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev)
+{
+ u32 status;
+
+ if (!mtk_wed_is_v3_or_greater(dev->hw))
+ return;
+
+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ /* prefetch FIFO */
+ wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
+ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
+ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
+ wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
+ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
+ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
+
+ /* core FIFO */
+ wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
+ wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
+
+ /* writeback FIFO */
+ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
+ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
+ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
+ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
+
+ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
+ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
+ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
+
+ /* prefetch ring status */
+ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
+ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
+ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
+
+ /* writeback ring status */
+ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
+ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
+ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
+}
+
static int
mtk_wdma_rx_reset(struct mtk_wed_device *dev)
{
@@ -121,6 +245,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
if (ret)
dev_err(dev->hw->dev, "rx reset failed\n");
+ mtk_wdma_v3_rx_reset(dev);
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
@@ -135,6 +260,101 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
return ret;
}
+static u32
+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return !!(wed_r32(dev, reg) & mask);
+}
+
+static int
+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ int sleep = 15000;
+ int timeout = 100 * sleep;
+ u32 val;
+
+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+ timeout, false, dev, reg, mask);
+}
+
+static void
+mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev)
+{
+ u32 status;
+
+ if (!mtk_wed_is_v3_or_greater(dev->hw))
+ return;
+
+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ /* prefetch FIFO */
+ wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
+ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
+ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
+ wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
+ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
+ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
+
+ /* core FIFO */
+ wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
+ wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
+
+ /* writeback FIFO */
+ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
+ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
+ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
+ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
+
+ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
+ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
+ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
+
+ /* prefetch ring status */
+ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
+ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
+ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
+
+ /* writeback ring status */
+ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
+ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
+ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
+}
+
static void
mtk_wdma_tx_reset(struct mtk_wed_device *dev)
{
@@ -146,6 +366,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
!(status & mask), 0, 10000))
dev_err(dev->hw->dev, "tx reset failed\n");
+ mtk_wdma_v3_tx_reset(dev);
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
@@ -278,7 +499,7 @@ mtk_wed_assign(struct mtk_wed_device *dev)
if (!hw->wed_dev)
goto out;
- if (hw->version == 1)
+ if (mtk_wed_is_v1(hw))
return NULL;
/* MT7986 WED devices do not have any pcie slot restrictions */
@@ -298,35 +519,152 @@ out:
}
static int
+mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw = dev->hw;
+ struct mtk_wed_amsdu *wed_amsdu;
+ int i;
+
+ if (!mtk_wed_is_v3_or_greater(hw))
+ return 0;
+
+ wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES,
+ sizeof(*wed_amsdu), GFP_KERNEL);
+ if (!wed_amsdu)
+ return -ENOMEM;
+
+ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
+ void *ptr;
+
+ /* each segment is 64K */
+ ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_COMP |
+ GFP_DMA32,
+ get_order(MTK_WED_AMSDU_BUF_SIZE));
+ if (!ptr)
+ goto error;
+
+ wed_amsdu[i].txd = ptr;
+ wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr,
+ MTK_WED_AMSDU_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy))
+ goto error;
+ }
+ dev->hw->wed_amsdu = wed_amsdu;
+
+ return 0;
+
+error:
+ for (i--; i >= 0; i--)
+ dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy,
+ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+static void
+mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
+ int i;
+
+ if (!wed_amsdu)
+ return;
+
+ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
+ dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy,
+ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
+ free_pages((unsigned long)wed_amsdu[i].txd,
+ get_order(MTK_WED_AMSDU_BUF_SIZE));
+ }
+}
+
+static int
+mtk_wed_amsdu_init(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
+ int i, ret;
+
+ if (!wed_amsdu)
+ return 0;
+
+ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++)
+ wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i),
+ wed_amsdu[i].txd_phy);
+
+ /* init all sta parameter */
+ wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL |
+ MTK_WED_AMSDU_STA_WTBL_HDRT_MODE |
+ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN,
+ dev->wlan.amsdu_max_len >> 8) |
+ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM,
+ dev->wlan.amsdu_max_subframes));
+
+ wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT);
+
+ ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO,
+ MTK_WED_AMSDU_STA_INFO_DO_INIT);
+ if (ret) {
+ dev_err(dev->hw->dev, "amsdu initialization failed\n");
+ return ret;
+ }
+
+ /* init partial amsdu offload txd src */
+ wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG,
+ FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index));
+
+ /* init qmem */
+ wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29));
+ if (ret) {
+ pr_info("%s: amsdu qmem initialization failed\n", __func__);
+ return ret;
+ }
+
+ /* eagle E1 PCIE1 tx ring 22 flow control issue */
+ if (dev->wlan.id == 0x7991)
+ wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
+
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
+
+ return 0;
+}
+
+static int
mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
{
- struct mtk_wdma_desc *desc;
- dma_addr_t desc_phys;
- void **page_list;
+ u32 desc_size = dev->hw->soc->tx_ring_desc_size;
+ int i, page_idx = 0, n_pages, ring_size;
int token = dev->wlan.token_start;
- int ring_size;
- int n_pages;
- int i, page_idx;
+ struct mtk_wed_buf *page_list;
+ dma_addr_t desc_phys;
+ void *desc_ptr;
- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
- n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
+ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ dev->tx_buf_ring.size = ring_size;
+ } else {
+ dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE;
+ ring_size = MTK_WED_TX_BM_PKT_CNT;
+ }
+ n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE;
page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
return -ENOMEM;
- dev->tx_buf_ring.size = ring_size;
dev->tx_buf_ring.pages = page_list;
- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
- &desc_phys, GFP_KERNEL);
- if (!desc)
+ desc_ptr = dma_alloc_coherent(dev->hw->dev,
+ dev->tx_buf_ring.size * desc_size,
+ &desc_phys, GFP_KERNEL);
+ if (!desc_ptr)
return -ENOMEM;
- dev->tx_buf_ring.desc = desc;
+ dev->tx_buf_ring.desc = desc_ptr;
dev->tx_buf_ring.desc_phys = desc_phys;
- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
dma_addr_t page_phys, buf_phys;
struct page *page;
void *buf;
@@ -343,7 +681,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
return -ENOMEM;
}
- page_list[page_idx++] = page;
+ page_list[page_idx].p = page;
+ page_list[page_idx++].phy_addr = page_phys;
dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -351,28 +690,31 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
buf_phys = page_phys;
for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
- u32 txd_size;
- u32 ctrl;
-
- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+ struct mtk_wdma_desc *desc = desc_ptr;
desc->buf0 = cpu_to_le32(buf_phys);
- desc->buf1 = cpu_to_le32(buf_phys + txd_size);
-
- if (dev->hw->version == 1)
- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
- MTK_WED_BUF_SIZE - txd_size) |
- MTK_WDMA_DESC_CTRL_LAST_SEG1;
- else
- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
- MTK_WED_BUF_SIZE - txd_size) |
- MTK_WDMA_DESC_CTRL_LAST_SEG0;
- desc->ctrl = cpu_to_le32(ctrl);
- desc->info = 0;
- desc++;
-
+ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
+ u32 txd_size, ctrl;
+
+ txd_size = dev->wlan.init_buf(buf, buf_phys,
+ token++);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size);
+ if (mtk_wed_is_v1(dev->hw))
+ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size);
+ else
+ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+ MTK_WED_BUF_SIZE - txd_size);
+ desc->ctrl = cpu_to_le32(ctrl);
+ desc->info = 0;
+ } else {
+ desc->ctrl = cpu_to_le32(token << 16);
+ }
+
+ desc_ptr += desc_size;
buf += MTK_WED_BUF_SIZE;
buf_phys += MTK_WED_BUF_SIZE;
}
@@ -387,42 +729,103 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
static void
mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
{
- struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
- void **page_list = dev->tx_buf_ring.pages;
- int page_idx;
- int i;
+ struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
+ struct mtk_wed_hw *hw = dev->hw;
+ int i, page_idx = 0;
if (!page_list)
return;
- if (!desc)
+ if (!dev->tx_buf_ring.desc)
goto free_pagelist;
- for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
- i += MTK_WED_BUF_PER_PAGE) {
- void *page = page_list[page_idx++];
- dma_addr_t buf_addr;
+ for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phy = page_list[page_idx].phy_addr;
+ void *page = page_list[page_idx++].p;
if (!page)
break;
- buf_addr = le32_to_cpu(desc[i].buf0);
- dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(page);
}
- dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
- desc, dev->tx_buf_ring.desc_phys);
+ dma_free_coherent(dev->hw->dev,
+ dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size,
+ dev->tx_buf_ring.desc,
+ dev->tx_buf_ring.desc_phys);
free_pagelist:
kfree(page_list);
}
static int
+mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
+{
+ int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
+ struct mtk_wed_buf *page_list;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ int i, page_idx = 0;
+
+ if (!dev->wlan.hw_rro)
+ return 0;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ dev->hw_rro.pages = page_list;
+ desc = dma_alloc_coherent(dev->hw->dev,
+ dev->wlan.rx_nbuf * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->hw_rro.desc = desc;
+ dev->hw_rro.desc_phys = desc_phys;
+
+ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ int s;
+
+ page = __dev_alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ page_list[page_idx].p = page;
+ page_list[page_idx++].phy_addr = page_phys;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ buf_phys = page_phys;
+ for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
+ desc->buf0 = cpu_to_le32(buf_phys);
+ buf_phys += MTK_WED_PAGE_BUF_SIZE;
+ desc++;
+ }
+
+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static int
mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
{
- struct mtk_rxbm_desc *desc;
+ struct mtk_wed_bm_desc *desc;
dma_addr_t desc_phys;
dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
@@ -436,13 +839,48 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
dev->rx_buf_ring.desc_phys = desc_phys;
dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
- return 0;
+ return mtk_wed_hwrro_buffer_alloc(dev);
+}
+
+static void
+mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_buf *page_list = dev->hw_rro.pages;
+ struct mtk_wed_bm_desc *desc = dev->hw_rro.desc;
+ int i, page_idx = 0;
+
+ if (!dev->wlan.hw_rro)
+ return;
+
+ if (!page_list)
+ return;
+
+ if (!desc)
+ goto free_pagelist;
+
+ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
+ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
+ void *page = page_list[page_idx++].p;
+
+ if (!page)
+ break;
+
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+ dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc),
+ desc, dev->hw_rro.desc_phys);
+
+free_pagelist:
+ kfree(page_list);
}
static void
mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
{
- struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
+ struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc;
if (!desc)
return;
@@ -450,6 +888,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
dev->wlan.release_rx_buf(dev);
dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
desc, dev->rx_buf_ring.desc_phys);
+
+ mtk_wed_hwrro_free_buffer(dev);
+}
+
+static void
+mtk_wed_hwrro_init(struct mtk_wed_device *dev)
+{
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
+ return;
+
+ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
+ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
+
+ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys);
+
+ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
+ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
+ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
+ MTK_WED_RX_PG_BM_CNT));
+
+ /* enable rx_page_bm to fetch dmad */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
}
static void
@@ -463,6 +923,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+
+ mtk_wed_hwrro_init(dev);
}
static void
@@ -498,13 +960,23 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
{
u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
- if (dev->hw->version == 1)
+ switch (dev->hw->version) {
+ case 1:
mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
- else
+ break;
+ case 2:
mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+ break;
+ case 3:
+ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ break;
+ default:
+ break;
+ }
if (!dev->hw->num_flows)
mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
@@ -516,6 +988,9 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
static void
mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
{
+ if (!mtk_wed_is_v2(dev->hw))
+ return;
+
if (enable) {
wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
wed_w32(dev, MTK_WED_TXP_DW1,
@@ -527,22 +1002,15 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
}
}
-#define MTK_WFMDA_RX_DMA_EN BIT(2)
-static void
-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
+static int
+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
+ struct mtk_wed_ring *ring)
{
- u32 val;
int i;
- if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
- return; /* queue is not configured by mt76 */
-
for (i = 0; i < 3; i++) {
- u32 cur_idx;
+ u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
- cur_idx = wed_r32(dev,
- MTK_WED_WPDMA_RING_RX_DATA(idx) +
- MTK_WED_RING_OFS_CPU_IDX);
if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
break;
@@ -551,12 +1019,10 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
if (i == 3) {
dev_err(dev->hw->dev, "rx dma enable failed\n");
- return;
+ return -ETIMEDOUT;
}
- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
- MTK_WFMDA_RX_DMA_EN;
- wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
+ return 0;
}
static void
@@ -577,7 +1043,7 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
wdma_clr(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
@@ -590,6 +1056,14 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
MTK_WED_WPDMA_RX_D_RX_DRV_EN);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
+ if (mtk_wed_is_v3_or_greater(dev->hw) &&
+ mtk_wed_get_rx_capa(dev)) {
+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
+ MTK_WDMA_PREF_TX_CFG_PREF_EN);
+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
+ MTK_WDMA_PREF_RX_CFG_PREF_EN);
+ }
}
mtk_wed_set_512_support(dev, false);
@@ -606,7 +1080,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
- if (dev->hw->version == 1)
+ if (!mtk_wed_get_rx_capa(dev))
return;
wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
@@ -625,13 +1099,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev)
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- if (dev->hw->version == 1)
+ if (mtk_wed_is_v1(dev->hw))
return;
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_ROUTE_QM_EN |
MTK_WED_CTRL_WED_RX_BM_EN |
MTK_WED_CTRL_RX_RRO_QM_EN);
+
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
+ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU);
+ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
+ }
}
static void
@@ -643,6 +1125,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev)
mtk_wdma_rx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_amsdu_free_buffer(dev);
mtk_wed_free_tx_buffer(dev);
mtk_wed_free_tx_rings(dev);
@@ -681,21 +1164,37 @@ mtk_wed_detach(struct mtk_wed_device *dev)
mutex_unlock(&hw_lock);
}
-#define PCIE_BASE_ADDR0 0x11280000
static void
mtk_wed_bus_init(struct mtk_wed_device *dev)
{
switch (dev->wlan.bus_type) {
case MTK_WED_BUS_PCIE: {
struct device_node *np = dev->hw->eth->dev->of_node;
- struct regmap *regs;
- regs = syscon_regmap_lookup_by_phandle(np,
- "mediatek,wed-pcie");
- if (IS_ERR(regs))
- break;
+ if (mtk_wed_is_v2(dev->hw)) {
+ struct regmap *regs;
+
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
+ if (IS_ERR(regs))
+ break;
- regmap_update_bits(regs, 0, BIT(0), BIT(0));
+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
+ }
+
+ if (dev->wlan.msi) {
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
+ dev->hw->pcie_base | 0xc08);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+ dev->hw->pcie_base | 0xc04);
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
+ } else {
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
+ dev->hw->pcie_base | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+ dev->hw->pcie_base | 0x184);
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ }
wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
@@ -703,19 +1202,9 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
/* pcie interrupt control: pola/source selection */
wed_set(dev, MTK_WED_PCIE_INT_CTRL,
MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
-
- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
-
- /* pcie interrupt status trigger register */
- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
-
- /* pola setting */
- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL,
+ dev->hw->index));
break;
}
case MTK_WED_BUS_AXI:
@@ -731,38 +1220,55 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
static void
mtk_wed_set_wpdma(struct mtk_wed_device *dev)
{
- if (dev->hw->version == 1) {
- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
- } else {
- mtk_wed_bus_init(dev);
+ int i;
- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
- wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
- wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ return;
}
+
+ mtk_wed_bus_init(dev);
+
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+
+ if (!mtk_wed_get_rx_capa(dev))
+ return;
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+
+ if (!dev->wlan.hw_rro)
+ return;
+
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++)
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
+ dev->wlan.wpdma_rx_pg + i * 0x10);
}
static void
mtk_wed_hw_init_early(struct mtk_wed_device *dev)
{
- u32 mask, set;
+ u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
+ u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
mtk_wed_deinit(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
mtk_wed_set_wpdma(dev);
- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
+ mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+ set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ }
wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
u32 offset = dev->hw->index ? 0x04000400 : 0;
wdma_set(dev, MTK_WDMA_GLO_CFG,
@@ -907,11 +1413,18 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
}
/* configure RX_ROUTE_QM */
- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ if (mtk_wed_is_v2(dev->hw)) {
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT,
+ 0x3 + dev->hw->index));
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ } else {
+ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
+ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT,
+ 0x3 + dev->hw->index));
+ }
/* enable RX_ROUTE_QM */
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
}
@@ -924,34 +1437,30 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
dev->init_done = true;
mtk_wed_set_ext_int(dev, false);
- wed_w32(dev, MTK_WED_TX_BM_CTRL,
- MTK_WED_TX_BM_CTRL_PAUSE |
- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
- dev->tx_buf_ring.size / 128) |
- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
- MTK_WED_TX_RING_SIZE / 256));
wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
-
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
- if (dev->hw->version == 1) {
- wed_w32(dev, MTK_WED_TX_BM_TKID,
- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
- dev->wlan.token_start) |
- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
- dev->wlan.token_start +
- dev->wlan.nbuf - 1));
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+ MTK_WED_TX_RING_SIZE / 256));
wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
MTK_WED_TX_BM_DYN_THR_HI);
- } else {
- wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
- dev->wlan.token_start) |
- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
- dev->wlan.token_start +
- dev->wlan.nbuf - 1));
+ } else if (mtk_wed_is_v2(dev->hw)) {
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+ MTK_WED_TX_RING_SIZE / 256));
+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+ MTK_WED_TX_TKID_DYN_THR_HI);
wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
MTK_WED_TX_BM_DYN_THR_HI_V2);
@@ -961,31 +1470,71 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
dev->tx_buf_ring.size / 128));
- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
- MTK_WED_TX_TKID_DYN_THR_HI);
}
+ wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start + dev->wlan.nbuf - 1));
+
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ /* switch to new bm architecture */
+ wed_clr(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_LEGACY_EN);
+
+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3,
+ dev->wlan.nbuf / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3,
+ dev->wlan.nbuf / 128));
+ /* return SKBID + SDP back to bm */
+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
+
+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR,
+ MTK_WED_TX_BM_PKT_CNT |
+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
+ }
+
+ if (mtk_wed_is_v1(dev->hw)) {
wed_set(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- } else {
- wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+ } else if (mtk_wed_get_rx_capa(dev)) {
/* rx hw init */
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+ /* reset prefetch index of ring */
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+
+ /* reset prefetch FIFO of ring */
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
+
mtk_wed_rx_buffer_hw_init(dev);
mtk_wed_rro_hw_init(dev);
mtk_wed_route_qm_hw_init(dev);
}
wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+ if (!mtk_wed_is_v1(dev->hw))
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
}
static void
@@ -1008,23 +1557,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
}
}
-static u32
-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
-{
- return !!(wed_r32(dev, reg) & mask);
-}
-
-static int
-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
-{
- int sleep = 15000;
- int timeout = 100 * sleep;
- u32 val;
-
- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
- timeout, false, dev, reg, mask);
-}
-
static int
mtk_wed_rx_reset(struct mtk_wed_device *dev)
{
@@ -1038,13 +1570,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
if (ret)
return ret;
+ if (dev->wlan.hw_rro) {
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS,
+ MTK_WED_RX_IND_CMD_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG);
+ }
+
wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
+ if (!ret && mtk_wed_is_v3_or_greater(dev->hw))
+ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_BUSY);
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
} else {
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ /* 1.a. disable prefetch HW */
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_BUSY);
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL);
+ }
+
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
@@ -1072,23 +1624,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
}
+ if (dev->wlan.hw_rro) {
+ /* disable rro msdu page drv */
+ wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_EN);
+
+ /* disable rro data drv */
+ wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
+
+ /* rro msdu page drv reset */
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+
+ /* rro data drv reset */
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2),
+ MTK_WED_RRO_RX_D_DRV_CLR);
+ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2),
+ MTK_WED_RRO_RX_D_DRV_CLR);
+ }
+
/* reset route qm */
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
- if (ret)
+ if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
- else
- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
- MTK_WED_RTQM_Q_RST);
+ } else if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_set(dev, MTK_WED_RTQM_RST, BIT(0));
+ wed_clr(dev, MTK_WED_RTQM_RST, BIT(0));
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
+ } else {
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ }
/* reset tx wdma */
mtk_wdma_tx_reset(dev);
/* reset tx wdma drv */
wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
- mtk_wed_poll_busy(dev, MTK_WED_CTRL,
- MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS,
+ MTK_WED_WPDMA_STATUS_TX_DRV);
+ else
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
/* reset wed rx dma */
@@ -1098,13 +1679,8 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
} else {
- struct mtk_eth *eth = dev->hw->eth;
-
- if (mtk_is_netsys_v2_or_greater(eth))
- wed_set(dev, MTK_WED_RESET_IDX,
- MTK_WED_RESET_IDX_RX_V2);
- else
- wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
+ wed_set(dev, MTK_WED_RESET_IDX,
+ dev->hw->soc->regmap.reset_idx_rx_mask);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
@@ -1114,6 +1690,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
MTK_WED_CTRL_WED_RX_BM_BUSY);
mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
+ if (dev->wlan.hw_rro) {
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_RX_PG_BM_BUSY);
+ wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
+ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
+ }
+
/* wo change to enable state */
val = MTK_WED_WO_STATE_ENABLE;
ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
@@ -1131,6 +1715,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
false);
}
mtk_wed_free_rx_buffer(dev);
+ mtk_wed_hwrro_free_buffer(dev);
return 0;
}
@@ -1157,21 +1742,48 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
} else {
- wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
+ wed_w32(dev, MTK_WED_RESET_IDX,
+ dev->hw->soc->regmap.reset_idx_tx_mask);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
/* 2. reset WDMA rx DMA */
busy = !!mtk_wdma_rx_reset(dev);
- wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE |
+ wed_r32(dev, MTK_WED_WDMA_GLO_CFG);
+ val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN;
+ wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val);
+ } else {
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ }
+
if (!busy)
busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
+ if (!busy && mtk_wed_is_v3_or_greater(dev->hw))
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_BUSY);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
} else {
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ /* 1.a. disable prefetch HW */
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_BUSY);
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
+
+ /* 2. Reset dma index */
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX_ALL);
+ }
+
wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
@@ -1187,8 +1799,13 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
for (i = 0; i < 100; i++) {
- val = wed_r32(dev, MTK_WED_TX_BM_INTF);
- if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
+ if (mtk_wed_is_v1(dev->hw))
+ val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP,
+ wed_r32(dev, MTK_WED_TX_BM_INTF));
+ else
+ val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP,
+ wed_r32(dev, MTK_WED_TX_TKID_INTF));
+ if (val == 0x40)
break;
}
@@ -1210,6 +1827,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ wed_w32(dev, MTK_WED_RX1_CTRL2, 0);
} else {
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
MTK_WED_WPDMA_RESET_IDX_TX |
@@ -1218,7 +1837,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
}
dev->init_done = false;
- if (dev->hw->version == 1)
+ if (mtk_wed_is_v1(dev->hw))
return;
if (!busy) {
@@ -1226,7 +1845,14 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
- mtk_wed_rx_reset(dev);
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ /* reset amsdu engine */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU);
+ }
+
+ if (mtk_wed_get_rx_capa(dev))
+ mtk_wed_rx_reset(dev);
}
static int
@@ -1249,7 +1875,6 @@ static int
mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
bool reset)
{
- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
if (idx >= ARRAY_SIZE(dev->rx_wdma))
@@ -1257,7 +1882,7 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
wdma = &dev->rx_wdma[idx];
if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
- desc_size, true))
+ dev->hw->soc->wdma_desc_size, true))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
@@ -1278,7 +1903,6 @@ static int
mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
bool reset)
{
- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
if (idx >= ARRAY_SIZE(dev->tx_wdma))
@@ -1286,9 +1910,27 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
wdma = &dev->tx_wdma[idx];
if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
- desc_size, true))
+ dev->hw->soc->wdma_desc_size, true))
return -ENOMEM;
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ struct mtk_wdma_desc *desc = wdma->desc;
+ int i;
+
+ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
+ desc->buf0 = 0;
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ desc->buf1 = 0;
+ desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE);
+ desc++;
+ desc->buf0 = 0;
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ desc->buf1 = 0;
+ desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE);
+ desc++;
+ }
+ }
+
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
@@ -1344,7 +1986,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
MTK_WED_PCIE_INT_TRIGGER_STATUS);
@@ -1354,8 +1996,9 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
- wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
- GENMASK(1, 0));
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
+
/* initail tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
@@ -1374,15 +2017,20 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
- dev->wlan.rx_tbit[0]) |
- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
- dev->wlan.rx_tbit[1]));
+ if (mtk_wed_get_rx_capa(dev)) {
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+ dev->wlan.rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+ dev->wlan.rx_tbit[1]));
+
+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
+ GENMASK(1, 0));
+ }
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
wed_set(dev, MTK_WED_WDMA_INT_CTRL,
@@ -1398,55 +2046,280 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
}
+#define MTK_WFMDA_RX_DMA_EN BIT(2)
static void
mtk_wed_dma_enable(struct mtk_wed_device *dev)
{
- wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+ int i;
+
+ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+ wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+ } else {
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
+ }
wed_set(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_TX_DMA_EN |
MTK_WED_GLO_CFG_RX_DMA_EN);
- wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
- MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
- wdma_set(dev, MTK_WDMA_GLO_CFG,
- MTK_WDMA_GLO_CFG_TX_DMA_EN |
- MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
-
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
- } else {
- int i;
+ return;
+ }
- wed_set(dev, MTK_WED_WPDMA_CTRL,
- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
- wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
- MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
- MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
+ wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+ }
- wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
- MTK_WED_WPDMA_RX_D_RX_DRV_EN |
- FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
- FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
- 0x2));
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+
+ if (!mtk_wed_get_rx_capa(dev))
+ return;
+
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2));
+
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
+
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
+ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ }
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
+ struct mtk_wed_ring *ring = &dev->rx_ring[i];
+ u32 val;
+
+ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue; /* queue is not configured by mt76 */
+
+ if (mtk_wed_check_wfdma_rx_fill(dev, ring)) {
+ dev_err(dev->hw->dev,
+ "rx_ring(%d) dma enable failed\n", i);
+ continue;
+ }
+
+ val = wifi_r32(dev,
+ dev->wlan.wpdma_rx_glo -
+ dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN;
+ wifi_w32(dev,
+ dev->wlan.wpdma_rx_glo - dev->wlan.phy_base,
+ val);
+ }
+}
+
+static void
+mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
+{
+ int i;
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
+ return;
+
+ if (reset) {
+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_EN);
+ return;
+ }
+
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
+ dev->wlan.rro_rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
+ dev->wlan.rro_rx_tbit[1]));
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[1]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[2]));
+
+ /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
+ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
+ */
+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_EN);
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[i];
+
+ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue;
+
+ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
+ dev_err(dev->hw->dev,
+ "rx_rro_ring(%d) initialization failed\n", i);
+ }
+
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
+ struct mtk_wed_ring *ring = &dev->rx_page_ring[i];
+
+ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue;
+
+ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
+ dev_err(dev->hw->dev,
+ "rx_page_ring(%d) initialization failed\n", i);
+ }
+}
+
+static void
+mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx,
+ void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
+
+ ring->wpdma = regs;
+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
+ readl(regs));
+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+}
+
+static void
+mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
+
+ ring->wpdma = regs;
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
+ readl(regs));
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+}
+
+static int
+mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
+ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
+ int i, count = 0;
- for (i = 0; i < MTK_WED_RX_QUEUES; i++)
- mtk_wed_check_wfdma_rx_fill(dev, i);
+ ring->wpdma = regs;
+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
+ readl(regs) & 0xfffffff0);
+
+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+
+ /* ack sn cr */
+ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
+ dev->wlan.ind_cmd.ack_sn_addr);
+ wed_w32(dev, MTK_WED_RRO_CFG1,
+ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
+ dev->wlan.ind_cmd.win_size) |
+ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
+ dev->wlan.ind_cmd.particular_sid));
+
+ /* particular session addr element */
+ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0,
+ dev->wlan.ind_cmd.particular_se_phys);
+
+ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
+ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
+ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
+ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
+ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
+
+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
+ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100)
+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
+ if (count >= 100)
+ dev_err(dev->hw->dev,
+ "write ba session base failed\n");
+ }
+
+ /* pn check init */
+ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
+ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
+ MTK_WED_PN_CHECK_IS_FIRST);
+
+ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
+ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
+
+ count = 0;
+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
+ while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100)
+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
+ if (count >= 100)
+ dev_err(dev->hw->dev,
+ "session(%d) initialization failed\n", i);
}
+
+ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
+
+ return 0;
}
static void
@@ -1466,14 +2339,14 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
mtk_wed_set_ext_int(dev, true);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
dev->hw->index);
val |= BIT(0) | (BIT(1) * !!dev->hw->index);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
- } else {
+ } else if (mtk_wed_get_rx_capa(dev)) {
/* driver set mid ready and only once */
wed_w32(dev, MTK_WED_EXT_INT_MASK1,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
@@ -1483,12 +2356,18 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
wed_r32(dev, MTK_WED_EXT_INT_MASK1);
wed_r32(dev, MTK_WED_EXT_INT_MASK2);
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
+ }
+
if (mtk_wed_rro_cfg(dev))
return;
-
}
mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+ mtk_wed_amsdu_init(dev);
mtk_wed_dma_enable(dev);
dev->running = true;
@@ -1535,6 +2414,7 @@ mtk_wed_attach(struct mtk_wed_device *dev)
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
dev->version = hw->version;
+ dev->hw->pcie_base = mtk_wed_get_pcie_base(dev);
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
@@ -1544,6 +2424,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
if (ret)
goto out;
+ ret = mtk_wed_amsdu_buffer_alloc(dev);
+ if (ret)
+ goto out;
+
if (mtk_wed_get_rx_capa(dev)) {
ret = mtk_wed_rro_alloc(dev);
if (ret)
@@ -1551,13 +2435,14 @@ mtk_wed_attach(struct mtk_wed_device *dev)
}
mtk_wed_hw_init_early(dev);
- if (hw->version == 1) {
+ if (mtk_wed_is_v1(hw))
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
BIT(hw->index), 0);
- } else {
+ else
dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
+
+ if (mtk_wed_get_rx_capa(dev))
ret = mtk_wed_wo_init(hw);
- }
out:
if (ret) {
dev_err(dev->hw->dev, "failed to attach wed device\n");
@@ -1601,6 +2486,23 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
ring->reg_base = MTK_WED_RING_TX(idx);
ring->wpdma = regs;
+ if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) {
+ /* reset prefetch index */
+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
+
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
+
+ /* reset prefetch FIFO */
+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
+ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
+ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
+ }
+
/* WED -> WPDMA */
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
@@ -1619,7 +2521,7 @@ static int
mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->txfree_ring;
- int i, index = dev->hw->version == 1;
+ int i, index = mtk_wed_is_v1(dev->hw);
/*
* For txfree event handling, the same DMA ring is shared between WED
@@ -1675,15 +2577,13 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
- u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ u32 val, ext_mask;
- if (dev->hw->version == 1)
- ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
else
- ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
- MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
- MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+ ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
@@ -1713,19 +2613,20 @@ mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
int mtk_wed_flow_add(int index)
{
struct mtk_wed_hw *hw = hw_list[index];
- int ret;
+ int ret = 0;
- if (!hw || !hw->wed_dev)
- return -ENODEV;
+ mutex_lock(&hw_lock);
- if (hw->num_flows) {
- hw->num_flows++;
- return 0;
+ if (!hw || !hw->wed_dev) {
+ ret = -ENODEV;
+ goto out;
}
- mutex_lock(&hw_lock);
- if (!hw->wed_dev) {
- ret = -ENODEV;
+ if (!hw->wed_dev->wlan.offload_enable)
+ goto out;
+
+ if (hw->num_flows) {
+ hw->num_flows++;
goto out;
}
@@ -1744,14 +2645,15 @@ void mtk_wed_flow_remove(int index)
{
struct mtk_wed_hw *hw = hw_list[index];
- if (!hw)
- return;
+ mutex_lock(&hw_lock);
- if (--hw->num_flows)
- return;
+ if (!hw || !hw->wed_dev)
+ goto out;
- mutex_lock(&hw_lock);
- if (!hw->wed_dev)
+ if (!hw->wed_dev->wlan.offload_disable)
+ goto out;
+
+ if (--hw->num_flows)
goto out;
hw->wed_dev->wlan.offload_disable(hw->wed_dev);
@@ -1842,7 +2744,7 @@ mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
{
struct mtk_wed_hw *hw = wed->hw;
- if (hw->version < 2)
+ if (mtk_wed_is_v1(hw))
return -EOPNOTSUPP;
switch (type) {
@@ -1874,6 +2776,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
.detach = mtk_wed_detach,
.ppe_check = mtk_wed_ppe_check,
.setup_tc = mtk_wed_setup_tc,
+ .start_hw_rro = mtk_wed_start_hw_rro,
+ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
+ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
+ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
};
struct device_node *eth_np = eth->dev->of_node;
struct platform_device *pdev;
@@ -1916,9 +2822,17 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
- hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
+ hw->version = eth->soc->version;
- if (hw->version == 1) {
+ switch (hw->version) {
+ case 2:
+ hw->soc = &mt7986_data;
+ break;
+ case 3:
+ hw->soc = &mt7988_data;
+ break;
+ default:
+ case 1:
hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
"mediatek,pcie-mirror");
hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
@@ -1932,6 +2846,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
regmap_write(hw->mirror, 0, 0);
regmap_write(hw->mirror, 4, 0);
}
+ hw->soc = &mt7622_data;
+ break;
}
mtk_wed_hw_add_debugfs(hw);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index 43ab77eaf683..c1f0479d7a71 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -9,10 +9,29 @@
#include <linux/regmap.h>
#include <linux/netdevice.h>
+#include "mtk_wed_regs.h"
+
struct mtk_eth;
struct mtk_wed_wo;
+struct mtk_wed_soc_data {
+ struct {
+ u32 tx_bm_tkid;
+ u32 wpdma_rx_ring0;
+ u32 reset_idx_tx_mask;
+ u32 reset_idx_rx_mask;
+ } regmap;
+ u32 tx_ring_desc_size;
+ u32 wdma_desc_size;
+};
+
+struct mtk_wed_amsdu {
+ void *txd;
+ dma_addr_t txd_phy;
+};
+
struct mtk_wed_hw {
+ const struct mtk_wed_soc_data *soc;
struct device_node *node;
struct mtk_eth *eth;
struct regmap *regs;
@@ -24,6 +43,8 @@ struct mtk_wed_hw {
struct dentry *debugfs_dir;
struct mtk_wed_device *wed_dev;
struct mtk_wed_wo *wed_wo;
+ struct mtk_wed_amsdu *wed_amsdu;
+ u32 pcie_base;
u32 debugfs_reg;
u32 num_flows;
u8 version;
@@ -37,9 +58,30 @@ struct mtk_wdma_info {
u8 queue;
u16 wcid;
u8 bss;
+ u8 amsdu;
};
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static inline bool mtk_wed_is_v1(struct mtk_wed_hw *hw)
+{
+ return hw->version == 1;
+}
+
+static inline bool mtk_wed_is_v2(struct mtk_wed_hw *hw)
+{
+ return hw->version == 2;
+}
+
+static inline bool mtk_wed_is_v3(struct mtk_wed_hw *hw)
+{
+ return hw->version == 3;
+}
+
+static inline bool mtk_wed_is_v3_or_greater(struct mtk_wed_hw *hw)
+{
+ return hw->version > 2;
+}
+
static inline void
wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
{
@@ -122,6 +164,21 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
writel(val, dev->txfree_ring.wpdma + reg);
}
+static inline u32 mtk_wed_get_pcie_base(struct mtk_wed_device *dev)
+{
+ if (!mtk_wed_is_v3_or_greater(dev->hw))
+ return MTK_WED_PCIE_BASE;
+
+ switch (dev->hw->index) {
+ case 1:
+ return MTK_WED_PCIE_BASE1;
+ case 2:
+ return MTK_WED_PCIE_BASE2;
+ default:
+ return MTK_WED_PCIE_BASE0;
+ }
+}
+
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
void __iomem *wdma, phys_addr_t wdma_phy,
int index);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
index e24afeaea0da..781c691473e1 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -11,6 +11,7 @@ struct reg_dump {
u16 offset;
u8 type;
u8 base;
+ u32 mask;
};
enum {
@@ -25,6 +26,8 @@ enum {
#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
+#define DUMP_REG_MASK(_reg, _mask) \
+ { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
#define DUMP_RING(_prefix, _base, ...) \
{ _prefix " BASE", _base, __VA_ARGS__ }, \
{ _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
@@ -32,6 +35,7 @@ enum {
{ _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
+#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
@@ -151,7 +155,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
static int
wed_rxinfo_show(struct seq_file *s, void *data)
{
- static const struct reg_dump regs[] = {
+ static const struct reg_dump regs_common[] = {
DUMP_STR("WPDMA RX"),
DUMP_WPDMA_RX_RING(0),
DUMP_WPDMA_RX_RING(1),
@@ -169,7 +173,7 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED_RING(WED_RING_RX_DATA(0)),
DUMP_WED_RING(WED_RING_RX_DATA(1)),
- DUMP_STR("WED RRO"),
+ DUMP_STR("WED WO RRO"),
DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
DUMP_WED(WED_RROQM_MID_MIB),
DUMP_WED(WED_RROQM_MOD_MIB),
@@ -180,17 +184,6 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
- DUMP_STR("WED Route QM"),
- DUMP_WED(WED_RTQM_R2H_MIB(0)),
- DUMP_WED(WED_RTQM_R2Q_MIB(0)),
- DUMP_WED(WED_RTQM_Q2H_MIB(0)),
- DUMP_WED(WED_RTQM_R2H_MIB(1)),
- DUMP_WED(WED_RTQM_R2Q_MIB(1)),
- DUMP_WED(WED_RTQM_Q2H_MIB(1)),
- DUMP_WED(WED_RTQM_Q2N_MIB),
- DUMP_WED(WED_RTQM_Q2B_MIB),
- DUMP_WED(WED_RTQM_PFDBK_MIB),
-
DUMP_STR("WED WDMA TX"),
DUMP_WED(WED_WDMA_TX_MIB),
DUMP_WED_RING(WED_WDMA_RING_TX),
@@ -211,6 +204,287 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED(WED_RX_BM_INTF),
DUMP_WED(WED_RX_BM_ERR_STS),
};
+ static const struct reg_dump regs_wed_v2[] = {
+ DUMP_STR("WED Route QM"),
+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2N_MIB),
+ DUMP_WED(WED_RTQM_Q2B_MIB),
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
+ };
+ static const struct reg_dump regs_wed_v3[] = {
+ DUMP_STR("WED RX RRO DATA"),
+ DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
+ DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
+
+ DUMP_STR("WED RX MSDU PAGE"),
+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
+
+ DUMP_STR("WED RX IND CMD"),
+ DUMP_WED(WED_IND_CMD_RX_CTRL1),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
+ WED_IND_CMD_PREFETCH_FREE_CNT),
+ DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
+
+ DUMP_STR("WED ADDR ELEM"),
+ DUMP_WED(WED_ADDR_ELEM_CFG0),
+ DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
+ WED_ADDR_ELEM_PREFETCH_FREE_CNT),
+
+ DUMP_STR("WED Route QM"),
+ DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
+ DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
+ DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
+ DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
+ DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
+ DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
+
+ DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
+ DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
+ DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
+ DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
+ DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
+ DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev) {
+ dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common));
+ if (mtk_wed_is_v2(hw))
+ dump_wed_regs(s, dev,
+ regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
+ else
+ dump_wed_regs(s, dev,
+ regs_wed_v3, ARRAY_SIZE(regs_wed_v3));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
+
+static int
+wed_amsdu_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WED AMDSU INFO"),
+ DUMP_WED(WED_MON_AMSDU_FIFO_DMAD),
+
+ DUMP_STR("WED AMDSU ENG0 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(0)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(0)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(0)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(0)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(0)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG1 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(1)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(1)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(1)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(1)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(1)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(1),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG2 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(2)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(2)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(2)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(2)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(2)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG3 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(3)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(3)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(3)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(3)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(3)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG4 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(4)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(4)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(4)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(4)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(4)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG5 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(5)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(5)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(5)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(5)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(5)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG6 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(6)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(6)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(6)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(6)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(6)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG7 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(7)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(7)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(7)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(7)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(7)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG8 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(8)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(8)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(8)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(8)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(8)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED QMEM INFO"),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_FQ_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_SP_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID0_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID1_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID2_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID3_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID4_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID5_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID6_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID7_QCNT),
+
+ DUMP_STR("WED QMEM HEAD INFO"),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_FQ_HEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_SP_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID0_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID1_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID2_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID3_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID4_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID5_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID6_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID7_QHEAD),
+
+ DUMP_STR("WED QMEM TAIL INFO"),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_FQ_TAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_SP_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID0_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID1_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID2_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID3_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID4_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID5_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID6_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID7_QTAIL),
+
+ DUMP_STR("WED HIFTXD MSDU INFO"),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(1)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(2)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(3)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(4)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(5)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(6)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(7)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(8)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(9)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(10)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(11)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(12)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(13)),
+ };
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
@@ -219,7 +493,94 @@ wed_rxinfo_show(struct seq_file *s, void *data)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
+DEFINE_SHOW_ATTRIBUTE(wed_amsdu);
+
+static int
+wed_rtqm_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
+ DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
+
+ DUMP_STR("WED Route QM IGRS1(Legacy)"),
+ DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
+
+ DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
+
+ DUMP_STR("WED Route QM IGRS3(DEBUG)"),
+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
+
+static int
+wed_rro_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("RRO/IND CMD CNT"),
+ DUMP_WED(WED_RX_IND_CMD_CNT(1)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(2)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(3)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(4)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(5)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(6)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(7)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(8)),
+ DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
+ WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
+
+ DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
+ DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
+ WED_ADDR_ELEM_SIG_FAIL_CNT),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
+ DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
+ WED_PN_CHK_FAIL_CNT),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_rro);
static int
mtk_wed_reg_set(void *data, u64 val)
@@ -261,7 +622,16 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
- if (hw->version != 1)
+ if (!mtk_wed_is_v1(hw)) {
debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
&wed_rxinfo_fops);
+ if (mtk_wed_is_v3_or_greater(hw)) {
+ debugfs_create_file_unsafe("amsdu", 0400, dir, hw,
+ &wed_amsdu_fops);
+ debugfs_create_file_unsafe("rtqm", 0400, dir, hw,
+ &wed_rtqm_fops);
+ debugfs_create_file_unsafe("rro", 0400, dir, hw,
+ &wed_rro_fops);
+ }
+ }
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
index 071ed3dea860..65a78e274009 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -16,14 +16,30 @@
#include "mtk_wed_wo.h"
#include "mtk_wed.h"
+static struct mtk_wed_wo_memory_region mem_region[] = {
+ [MTK_WED_WO_REGION_EMI] = {
+ .name = "wo-emi",
+ },
+ [MTK_WED_WO_REGION_ILM] = {
+ .name = "wo-ilm",
+ },
+ [MTK_WED_WO_REGION_DATA] = {
+ .name = "wo-data",
+ .shared = true,
+ },
+ [MTK_WED_WO_REGION_BOOT] = {
+ .name = "wo-boot",
+ },
+};
+
static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
{
- return readl(wo->boot.addr + reg);
+ return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
{
- writel(val, wo->boot.addr + reg);
+ writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
static struct sk_buff *
@@ -68,6 +84,9 @@ mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb)
struct mtk_wed_wo_rx_stats *stats;
int i;
+ if (!wed->wlan.update_wo_rx_stats)
+ return;
+
if (count * sizeof(*stats) > skb->len - sizeof(u32))
return;
@@ -204,7 +223,7 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
- if (dev->hw->version == 1)
+ if (!mtk_wed_get_rx_capa(dev))
return 0;
if (WARN_ON(!wo))
@@ -215,19 +234,13 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
}
static int
-mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
+mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
struct mtk_wed_wo_memory_region *region)
{
struct reserved_mem *rmem;
struct device_node *np;
- int index;
-
- index = of_property_match_string(wo->hw->node, "memory-region-names",
- region->name);
- if (index < 0)
- return index;
- np = of_parse_phandle(wo->hw->node, "memory-region", index);
+ np = of_parse_phandle(hw->node, "memory-region", index);
if (!np)
return -ENODEV;
@@ -239,7 +252,7 @@ mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
region->phy_addr = rmem->base;
region->size = rmem->size;
- region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
+ region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
return !region->addr ? -EINVAL : 0;
}
@@ -252,6 +265,9 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
const struct mtk_wed_fw_trailer *trailer;
const struct mtk_wed_fw_region *fw_region;
+ if (!region->phy_addr || !region->size)
+ return 0;
+
trailer_ptr = fw->data + fw->size - sizeof(*trailer);
trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
@@ -291,18 +307,6 @@ next:
static int
mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
{
- static struct mtk_wed_wo_memory_region mem_region[] = {
- [MTK_WED_WO_REGION_EMI] = {
- .name = "wo-emi",
- },
- [MTK_WED_WO_REGION_ILM] = {
- .name = "wo-ilm",
- },
- [MTK_WED_WO_REGION_DATA] = {
- .name = "wo-data",
- .shared = true,
- },
- };
const struct mtk_wed_fw_trailer *trailer;
const struct firmware *fw;
const char *fw_name;
@@ -311,25 +315,38 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
/* load firmware region metadata */
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
- ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
+ int index = of_property_match_string(wo->hw->node,
+ "memory-region-names",
+ mem_region[i].name);
+ if (index < 0)
+ continue;
+
+ ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
if (ret)
return ret;
}
- wo->boot.name = "wo-boot";
- ret = mtk_wed_get_memory_region(wo, &wo->boot);
- if (ret)
- return ret;
-
/* set dummy cr */
wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
wo->hw->index + 1);
/* load firmware */
- if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
- fw_name = MT7981_FIRMWARE_WO;
- else
- fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
+ switch (wo->hw->version) {
+ case 2:
+ if (of_device_is_compatible(wo->hw->node,
+ "mediatek,mt7981-wed"))
+ fw_name = MT7981_FIRMWARE_WO;
+ else
+ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1
+ : MT7986_FIRMWARE_WO0;
+ break;
+ case 3:
+ fw_name = wo->hw->index ? MT7988_FIRMWARE_WO1
+ : MT7988_FIRMWARE_WO0;
+ break;
+ default:
+ return -EINVAL;
+ }
ret = request_firmware(&fw, fw_name, wo->hw->dev);
if (ret)
@@ -350,15 +367,16 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
}
/* set the start address */
- boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
- : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
+ if (!mtk_wed_is_v3_or_greater(wo->hw) && wo->hw->index)
+ boot_cr = MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR;
+ else
+ boot_cr = MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
/* wo firmware reset */
wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
- val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
- val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
- : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
+ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) |
+ MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
out:
release_firmware(fw);
@@ -393,3 +411,5 @@ int mtk_wed_mcu_init(struct mtk_wed_wo *wo)
MODULE_FIRMWARE(MT7981_FIRMWARE_WO);
MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
+MODULE_FIRMWARE(MT7988_FIRMWARE_WO0);
+MODULE_FIRMWARE(MT7988_FIRMWARE_WO1);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
index 47ea69feb3b2..c71190924816 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -13,6 +13,9 @@
#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29)
+#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31)
+
struct mtk_wdma_desc {
__le32 buf0;
__le32 ctrl;
@@ -25,6 +28,8 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET 0x008
#define MTK_WED_RESET_TX_BM BIT(0)
#define MTK_WED_RESET_RX_BM BIT(1)
+#define MTK_WED_RESET_RX_PG_BM BIT(2)
+#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3)
#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
@@ -37,6 +42,7 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
#define MTK_WED_RESET_RX_RRO_QM BIT(20)
#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
+#define MTK_WED_RESET_TX_AMSDU BIT(22)
#define MTK_WED_RESET_WED BIT(31)
#define MTK_WED_CTRL 0x00c
@@ -44,6 +50,9 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
+#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5)
+#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6)
+#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY BIT(7)
#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
@@ -54,9 +63,14 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
+#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20)
+#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21)
+#define MTK_WED_CTRL_TX_AMSDU_EN BIT(22)
+#define MTK_WED_CTRL_TX_AMSDU_BUSY BIT(23)
#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
+#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28)
#define MTK_WED_EXT_INT_STATUS 0x020
#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
@@ -64,8 +78,8 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
@@ -89,19 +103,26 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_MASK 0x028
#define MTK_WED_EXT_INT_MASK1 0x02c
#define MTK_WED_EXT_INT_MASK2 0x030
+#define MTK_WED_EXT_INT_MASK3 0x034
#define MTK_WED_STATUS 0x060
#define MTK_WED_STATUS_TX GENMASK(15, 8)
+#define MTK_WED_WPDMA_STATUS 0x068
+#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8)
+
#define MTK_WED_TX_BM_CTRL 0x080
#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26)
+#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27)
#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
#define MTK_WED_TX_BM_BASE 0x084
+#define MTK_WED_TX_BM_INIT_PTR 0x088
+#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0)
+#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16)
-#define MTK_WED_TX_BM_TKID 0x088
-#define MTK_WED_TX_BM_TKID_V2 0x0c8
#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
@@ -124,6 +145,12 @@ struct mtk_wdma_desc {
#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+#define MTK_WED_TX_TKID_INTF 0x0dc
+#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16)
+
+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
+
#define MTK_WED_TX_TKID_DYN_THR 0x0e0
#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
@@ -160,9 +187,6 @@ struct mtk_wdma_desc {
#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
#define MTK_WED_RESET_IDX 0x20c
-#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
-#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
-#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
@@ -174,6 +198,7 @@ struct mtk_wdma_desc {
#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
#define MTK_WED_SCR0 0x3c0
+#define MTK_WED_RX1_CTRL2 0x418
#define MTK_WED_WPDMA_INT_TRIGGER 0x504
#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
@@ -204,12 +229,15 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20)
#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25)
#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30)
#define MTK_WED_WPDMA_RESET_IDX 0x50c
#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
@@ -255,9 +283,10 @@ struct mtk_wdma_desc {
#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
#define MTK_WED_PCIE_INT_CTRL 0x57c
-#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
-#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21)
#define MTK_WED_WPDMA_CFG_BASE 0x580
#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
@@ -283,15 +312,30 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20)
#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
-#define MTK_WED_WPDMA_RX_RING 0x770
#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
+#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
+#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
+#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1)
+#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
+#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
+
+#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8
+#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15)
+
+#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc
+
+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0
+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0)
+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16)
+
#define MTK_WED_WDMA_RING_TX 0x800
#define MTK_WED_WDMA_TX_MIB 0x810
@@ -299,6 +343,20 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+#define MTK_WED_WDMA_RX_PREF_CFG 0x950
+#define MTK_WED_WDMA_RX_PREF_EN BIT(0)
+#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1)
+#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
+#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
+#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
+#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
+#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
+#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27)
+
+#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
+#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
+#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16)
+
#define MTK_WED_WDMA_GLO_CFG 0xa04
#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
@@ -322,6 +380,7 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RESET_IDX 0xa08
#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20)
#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
#define MTK_WED_WDMA_INT_CLR 0xa24
@@ -331,6 +390,7 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
#define MTK_WED_WDMA_INT_CTRL 0xa2c
+#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0)
#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
#define MTK_WED_WDMA_CFG_BASE 0xaa0
@@ -391,9 +451,62 @@ struct mtk_wdma_desc {
#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238
+#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0)
+#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4)
+#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8)
+#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12)
+
+#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21)
+
#define MTK_WDMA_INT_GRP1 0x250
#define MTK_WDMA_INT_GRP2 0x254
+#define MTK_WDMA_PREF_TX_CFG 0x2d0
+#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
+#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1)
+
+#define MTK_WDMA_PREF_RX_CFG 0x2dc
+#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
+#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1)
+
+#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0
+#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0)
+#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16)
+
+#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4
+#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0)
+#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16)
+
+#define MTK_WDMA_PREF_SIDX_CFG 0x2e4
+#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
+#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
+
+#define MTK_WDMA_WRBK_TX_CFG 0x300
+#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0)
+#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
+
+#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4)
+#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0)
+
+#define MTK_WDMA_WRBK_RX_CFG 0x344
+#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0)
+#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
+
+#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4)
+#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0)
+
+#define MTK_WDMA_WRBK_SIDX_CFG 0x388
+#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
+#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
+
#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
@@ -407,6 +520,32 @@ struct mtk_wdma_desc {
#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+#define MTK_WED_RTQM_RST 0xb04
+
+#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
+#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
+#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34
+
+#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44
+#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50
+#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c
+
+#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c
+#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78
+#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84
+
+#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94
+#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0
+#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac
+
#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
#define MTK_WED_RTQM_Q2N_MIB 0xb80
@@ -415,6 +554,24 @@ struct mtk_wdma_desc {
#define MTK_WED_RTQM_Q2B_MIB 0xb8c
#define MTK_WED_RTQM_PFDBK_MIB 0xb90
+#define MTK_WED_RTQM_ENQ_CFG0 0xbb8
+#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12)
+
+#define MTK_WED_RTQM_FDROP_MIB 0xb84
+#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc
+#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0
+#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4
+#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8
+#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc
+#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0
+
+#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8
+#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc
+#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0
+#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4
+#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8
+#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec
+
#define MTK_WED_RROQM_GLO_CFG 0xc04
#define MTK_WED_RROQM_RST_IDX 0xc08
#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
@@ -464,7 +621,195 @@ struct mtk_wdma_desc {
#define MTK_WED_RX_BM_INTF 0xd9c
#define MTK_WED_RX_BM_ERR_STS 0xda8
+#define MTK_RRO_IND_CMD_SIGNATURE 0xe00
+#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0)
+#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28)
+
+#define MTK_WED_IND_CMD_RX_CTRL0 0xe04
+#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0)
+#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16)
+#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28)
+
+#define MTK_WED_IND_CMD_RX_CTRL1 0xe08
+#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c
+#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0)
+#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16)
+
+#define MTK_WED_RRO_CFG0 0xe10
+#define MTK_WED_RRO_CFG1 0xe14
+#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29)
+#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16)
+#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0)
+
+#define MTK_WED_ADDR_ELEM_CFG0 0xe18
+#define MTK_WED_ADDR_ELEM_CFG1 0xe1c
+#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16)
+
+#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20
+#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0)
+#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28)
+#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29)
+#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30)
+#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31)
+
+#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24
+#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28
+
+#define MTK_WED_PN_CHECK_CFG 0xe30
+#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0)
+#define MTK_WED_PN_CHECK_RD_RDY BIT(28)
+#define MTK_WED_PN_CHECK_WR_RDY BIT(29)
+#define MTK_WED_PN_CHECK_RD BIT(30)
+#define MTK_WED_PN_CHECK_WR BIT(31)
+
+#define MTK_WED_PN_CHECK_WDATA_M 0xe38
+#define MTK_WED_PN_CHECK_IS_FIRST BIT(17)
+
+#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8)
+
+#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58
+#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26)
+#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31)
+
+#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc)
+#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc)
+#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc)
+
+#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10)
+
+#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13)
+
+#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4)
+#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26)
+#define MTK_WED_RRO_RX_D_DRV_EN BIT(31)
+
+#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0
+#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0)
+
+#define MTK_WED_RRO_PG_BM_BASE 0xeb4
+#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8
+#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0)
+#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16)
+
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
+
+#define MTK_WED_RRO_RX_HW_STS 0xf00
+#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0)
+
+#define MTK_WED_RX_IND_CMD_CNT0 0xf20
+#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
+
+#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4)
+#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0)
+
+#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4)
+#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0)
+#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16)
+#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0)
+
+#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4)
+
+#define MTK_WED_RX_PN_CHK_CNT 0xf70
+#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0)
+
#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
#define MTK_WED_PCIE_INT_MASK 0x0
+#define MTK_WED_AMSDU_FIFO 0x1800
+#define MTK_WED_AMSDU_IS_PRIOR0_RING BIT(10)
+
+#define MTK_WED_AMSDU_STA_INFO 0x01810
+#define MTK_WED_AMSDU_STA_INFO_DO_INIT BIT(0)
+#define MTK_WED_AMSDU_STA_INFO_SET_INIT BIT(1)
+
+#define MTK_WED_AMSDU_STA_INFO_INIT 0x01814
+#define MTK_WED_AMSDU_STA_WTBL_HDRT_MODE BIT(0)
+#define MTK_WED_AMSDU_STA_RMVL BIT(1)
+#define MTK_WED_AMSDU_STA_MAX_AMSDU_LEN GENMASK(7, 2)
+#define MTK_WED_AMSDU_STA_MAX_AMSDU_NUM GENMASK(11, 8)
+
+#define MTK_WED_AMSDU_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
+
+#define MTK_WED_AMSDU_PSE 0x1910
+#define MTK_WED_AMSDU_PSE_RESET BIT(16)
+
+#define MTK_WED_AMSDU_HIFTXD_CFG 0x1968
+#define MTK_WED_AMSDU_HIFTXD_SRC GENMASK(16, 15)
+
+#define MTK_WED_MON_AMSDU_FIFO_DMAD 0x1a34
+
+#define MTK_WED_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
+#define MTK_WED_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
+#define MTK_WED_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
+#define MTK_WED_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
+#define MTK_WED_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
+
+#define MTK_WED_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
+#define MTK_WED_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
+#define MTK_WED_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
+
+#define MTK_WED_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
+#define MTK_WED_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
+#define MTK_WED_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
+#define MTK_WED_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
+
+#define MTK_WED_MON_AMSDU_QMEM_STS1 0x1e04
+
+#define MTK_WED_MON_AMSDU_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
+#define MTK_WED_AMSDU_QMEM_FQ_CNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_SP_QCNT GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID0_QCNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID1_QCNT GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID2_QCNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID3_QCNT GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID4_QCNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID5_QCNT GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID6_QCNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID7_QCNT GENMASK(11, 0)
+
+#define MTK_WED_MON_AMSDU_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
+#define MTK_WED_AMSDU_QMEM_FQ_HEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_SP_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID0_QHEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID1_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID2_QHEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID3_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID4_QHEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID5_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID6_QHEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID7_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_FQ_TAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_SP_QTAIL GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID0_QTAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID1_QTAIL GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID2_QTAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID3_QTAIL GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID4_QTAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID5_QTAIL GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID6_QTAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID7_QTAIL GENMASK(11, 0)
+
+#define MTK_WED_MON_AMSDU_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
+
+#define MTK_WED_PCIE_BASE 0x11280000
+#define MTK_WED_PCIE_BASE0 0x11300000
+#define MTK_WED_PCIE_BASE1 0x11310000
+#define MTK_WED_PCIE_BASE2 0x11290000
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
index 7a1a2a28f1ac..87a67fa3868d 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -91,6 +91,8 @@ enum mtk_wed_dummy_cr_idx {
#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
+#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin"
+#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin"
#define MTK_WO_MCU_CFG_LS_BASE 0
#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
@@ -228,7 +230,6 @@ struct mtk_wed_wo_queue {
struct mtk_wed_wo {
struct mtk_wed_hw *hw;
- struct mtk_wed_wo_memory_region boot;
struct mtk_wed_wo_queue q_tx;
struct mtk_wed_wo_queue q_rx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index c4f4de82e29e..685335832a93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -189,3 +189,11 @@ config MLX5_SF_MANAGER
port is managed through devlink. A subfunction supports RDMA, netdevice
and vdpa device. It is similar to a SRIOV VF but it doesn't require
SRIOV support.
+
+config MLX5_DPLL
+ tristate "Mellanox 5th generation network adapters (ConnectX series) DPLL support"
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ select DPLL
+ help
+ DPLL support in Mellanox Technologies ConnectX NICs.
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 7e94caca4888..c44870b175f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -128,3 +128,6 @@ mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_
# SF manager
#
mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o
+
+obj-$(CONFIG_MLX5_DPLL) += mlx5_dpll.o
+mlx5_dpll-y := dpll.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 7909f378dc93..1fc03480c2ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -206,6 +206,19 @@ static bool is_ib_enabled(struct mlx5_core_dev *dev)
return err ? false : val.vbool;
}
+static bool is_dpll_supported(struct mlx5_core_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MLX5_DPLL))
+ return false;
+
+ if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
+ mlx5_core_warn(dev, "Missing SyncE capability\n");
+ return false;
+ }
+
+ return true;
+}
+
enum {
MLX5_INTERFACE_PROTOCOL_ETH,
MLX5_INTERFACE_PROTOCOL_ETH_REP,
@@ -215,6 +228,8 @@ enum {
MLX5_INTERFACE_PROTOCOL_MPIB,
MLX5_INTERFACE_PROTOCOL_VNET,
+
+ MLX5_INTERFACE_PROTOCOL_DPLL,
};
static const struct mlx5_adev_device {
@@ -237,6 +252,8 @@ static const struct mlx5_adev_device {
.is_supported = &is_ib_rep_supported },
[MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
.is_supported = &is_mp_supported },
+ [MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
+ .is_supported = &is_dpll_supported },
};
int mlx5_adev_idx_alloc(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index af8460bb257b..3e064234f6fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -138,7 +138,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
- bool sf_dev_allocated;
int ret = 0;
if (mlx5_dev_is_lightweight(dev)) {
@@ -148,16 +147,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
- sf_dev_allocated = mlx5_sf_dev_allocated(dev);
- if (sf_dev_allocated) {
- /* Reload results in deleting SF device which further results in
- * unregistering devlink instance while holding devlink_mutext.
- * Hence, do not support reload.
- */
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated");
- return -EOPNOTSUPP;
- }
-
if (mlx5_lag_is_active(dev)) {
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
new file mode 100644
index 000000000000..2cd81bb32c66
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/dpll.h>
+#include <linux/mlx5/driver.h>
+
+/* This structure represents a reference to DPLL, one is created
+ * per mdev instance.
+ */
+struct mlx5_dpll {
+ struct dpll_device *dpll;
+ struct dpll_pin *dpll_pin;
+ struct mlx5_core_dev *mdev;
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ struct {
+ bool valid;
+ enum dpll_lock_status lock_status;
+ enum dpll_pin_state pin_state;
+ } last;
+ struct notifier_block mdev_nb;
+ struct net_device *tracking_netdev;
+};
+
+static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id)
+{
+ u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MSECQ, 0, 0);
+ if (err)
+ return err;
+ *clock_id = MLX5_GET64(msecq_reg, out, local_clock_identity);
+ return 0;
+}
+
+static int
+mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
+ enum mlx5_msees_admin_status *admin_status,
+ enum mlx5_msees_oper_status *oper_status,
+ bool *ho_acq)
+{
+ u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MSEES, 0, 0);
+ if (err)
+ return err;
+ if (admin_status)
+ *admin_status = MLX5_GET(msees_reg, out, admin_status);
+ *oper_status = MLX5_GET(msees_reg, out, oper_status);
+ if (ho_acq)
+ *ho_acq = MLX5_GET(msees_reg, out, ho_acq);
+ return 0;
+}
+
+static int
+mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev,
+ enum mlx5_msees_admin_status admin_status)
+{
+ u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
+
+ MLX5_SET(msees_reg, in, field_select,
+ MLX5_MSEES_FIELD_SELECT_ENABLE |
+ MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS);
+ MLX5_SET(msees_reg, in, admin_status, admin_status);
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MSEES, 0, 1);
+}
+
+static enum dpll_lock_status
+mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq)
+{
+ switch (oper_status) {
+ case MLX5_MSEES_OPER_STATUS_SELF_TRACK:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_OTHER_TRACK:
+ return ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
+ DPLL_LOCK_STATUS_LOCKED;
+ case MLX5_MSEES_OPER_STATUS_HOLDOVER:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
+ return DPLL_LOCK_STATUS_HOLDOVER;
+ default:
+ return DPLL_LOCK_STATUS_UNLOCKED;
+ }
+}
+
+static enum dpll_pin_state
+mlx5_dpll_pin_state_get(enum mlx5_msees_admin_status admin_status,
+ enum mlx5_msees_oper_status oper_status)
+{
+ return (admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
+ (oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
+ oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
+ DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED;
+}
+
+static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
+ void *priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack)
+{
+ enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll *mdpll = priv;
+ bool ho_acq;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, NULL,
+ &oper_status, &ho_acq);
+ if (err)
+ return err;
+
+ *status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
+ return 0;
+}
+
+static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
+ void *priv, enum dpll_mode *mode,
+ struct netlink_ext_ack *extack)
+{
+ *mode = DPLL_MODE_MANUAL;
+ return 0;
+}
+
+static bool mlx5_dpll_device_mode_supported(const struct dpll_device *dpll,
+ void *priv,
+ enum dpll_mode mode,
+ struct netlink_ext_ack *extack)
+{
+ return mode == DPLL_MODE_MANUAL;
+}
+
+static const struct dpll_device_ops mlx5_dpll_device_ops = {
+ .lock_status_get = mlx5_dpll_device_lock_status_get,
+ .mode_get = mlx5_dpll_device_mode_get,
+ .mode_supported = mlx5_dpll_device_mode_supported,
+};
+
+static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ *direction = DPLL_PIN_DIRECTION_INPUT;
+ return 0;
+}
+
+static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ enum mlx5_msees_admin_status admin_status;
+ enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll *mdpll = pin_priv;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
+ &oper_status, NULL);
+ if (err)
+ return err;
+ *state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+ return 0;
+}
+
+static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_dpll *mdpll = pin_priv;
+
+ return mlx5_dpll_synce_status_set(mdpll->mdev,
+ state == DPLL_PIN_STATE_CONNECTED ?
+ MLX5_MSEES_ADMIN_STATUS_TRACK :
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
+}
+
+static const struct dpll_pin_ops mlx5_dpll_pins_ops = {
+ .direction_get = mlx5_dpll_pin_direction_get,
+ .state_on_dpll_get = mlx5_dpll_state_on_dpll_get,
+ .state_on_dpll_set = mlx5_dpll_state_on_dpll_set,
+};
+
+static const struct dpll_pin_properties mlx5_dpll_pin_properties = {
+ .type = DPLL_PIN_TYPE_SYNCE_ETH_PORT,
+ .capabilities = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE,
+};
+
+#define MLX5_DPLL_PERIODIC_WORK_INTERVAL 500 /* ms */
+
+static void mlx5_dpll_periodic_work_queue(struct mlx5_dpll *mdpll)
+{
+ queue_delayed_work(mdpll->wq, &mdpll->work,
+ msecs_to_jiffies(MLX5_DPLL_PERIODIC_WORK_INTERVAL));
+}
+
+static void mlx5_dpll_periodic_work(struct work_struct *work)
+{
+ struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
+ work.work);
+ enum mlx5_msees_admin_status admin_status;
+ enum mlx5_msees_oper_status oper_status;
+ enum dpll_lock_status lock_status;
+ enum dpll_pin_state pin_state;
+ bool ho_acq;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
+ &oper_status, &ho_acq);
+ if (err)
+ goto err_out;
+ lock_status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
+ pin_state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+
+ if (!mdpll->last.valid)
+ goto invalid_out;
+
+ if (mdpll->last.lock_status != lock_status)
+ dpll_device_change_ntf(mdpll->dpll);
+ if (mdpll->last.pin_state != pin_state)
+ dpll_pin_change_ntf(mdpll->dpll_pin);
+
+invalid_out:
+ mdpll->last.lock_status = lock_status;
+ mdpll->last.pin_state = pin_state;
+ mdpll->last.valid = true;
+err_out:
+ mlx5_dpll_periodic_work_queue(mdpll);
+}
+
+static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
+ struct net_device *netdev)
+{
+ if (mdpll->tracking_netdev)
+ return;
+ netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+ mdpll->tracking_netdev = netdev;
+}
+
+static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
+{
+ if (!mdpll->tracking_netdev)
+ return;
+ netdev_dpll_pin_clear(mdpll->tracking_netdev);
+ mdpll->tracking_netdev = NULL;
+}
+
+static int mlx5_dpll_mdev_notifier_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mlx5_dpll *mdpll = container_of(nb, struct mlx5_dpll, mdev_nb);
+ struct net_device *netdev = data;
+
+ switch (event) {
+ case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
+ if (netdev)
+ mlx5_dpll_netdev_dpll_pin_set(mdpll, netdev);
+ else
+ mlx5_dpll_netdev_dpll_pin_clear(mdpll);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void mlx5_dpll_mdev_netdev_track(struct mlx5_dpll *mdpll,
+ struct mlx5_core_dev *mdev)
+{
+ mdpll->mdev_nb.notifier_call = mlx5_dpll_mdev_notifier_event;
+ mlx5_blocking_notifier_register(mdev, &mdpll->mdev_nb);
+ mlx5_core_uplink_netdev_event_replay(mdev);
+}
+
+static void mlx5_dpll_mdev_netdev_untrack(struct mlx5_dpll *mdpll,
+ struct mlx5_core_dev *mdev)
+{
+ mlx5_blocking_notifier_unregister(mdev, &mdpll->mdev_nb);
+ mlx5_dpll_netdev_dpll_pin_clear(mdpll);
+}
+
+static int mlx5_dpll_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct mlx5_dpll *mdpll;
+ u64 clock_id;
+ int err;
+
+ err = mlx5_dpll_synce_status_set(mdev,
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
+ if (err)
+ return err;
+
+ err = mlx5_dpll_clock_id_get(mdev, &clock_id);
+ if (err)
+ return err;
+
+ mdpll = kzalloc(sizeof(*mdpll), GFP_KERNEL);
+ if (!mdpll)
+ return -ENOMEM;
+ mdpll->mdev = mdev;
+ auxiliary_set_drvdata(adev, mdpll);
+
+ /* Multiple mdev instances might share one DPLL device. */
+ mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE);
+ if (IS_ERR(mdpll->dpll)) {
+ err = PTR_ERR(mdpll->dpll);
+ goto err_free_mdpll;
+ }
+
+ err = dpll_device_register(mdpll->dpll, DPLL_TYPE_EEC,
+ &mlx5_dpll_device_ops, mdpll);
+ if (err)
+ goto err_put_dpll_device;
+
+ /* Multiple mdev instances might share one DPLL pin. */
+ mdpll->dpll_pin = dpll_pin_get(clock_id, mlx5_get_dev_index(mdev),
+ THIS_MODULE, &mlx5_dpll_pin_properties);
+ if (IS_ERR(mdpll->dpll_pin)) {
+ err = PTR_ERR(mdpll->dpll_pin);
+ goto err_unregister_dpll_device;
+ }
+
+ err = dpll_pin_register(mdpll->dpll, mdpll->dpll_pin,
+ &mlx5_dpll_pins_ops, mdpll);
+ if (err)
+ goto err_put_dpll_pin;
+
+ mdpll->wq = create_singlethread_workqueue("mlx5_dpll");
+ if (!mdpll->wq) {
+ err = -ENOMEM;
+ goto err_unregister_dpll_pin;
+ }
+
+ mlx5_dpll_mdev_netdev_track(mdpll, mdev);
+
+ INIT_DELAYED_WORK(&mdpll->work, &mlx5_dpll_periodic_work);
+ mlx5_dpll_periodic_work_queue(mdpll);
+
+ return 0;
+
+err_unregister_dpll_pin:
+ dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
+ &mlx5_dpll_pins_ops, mdpll);
+err_put_dpll_pin:
+ dpll_pin_put(mdpll->dpll_pin);
+err_unregister_dpll_device:
+ dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
+err_put_dpll_device:
+ dpll_device_put(mdpll->dpll);
+err_free_mdpll:
+ kfree(mdpll);
+ return err;
+}
+
+static void mlx5_dpll_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
+ struct mlx5_core_dev *mdev = mdpll->mdev;
+
+ cancel_delayed_work(&mdpll->work);
+ mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
+ destroy_workqueue(mdpll->wq);
+ dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
+ &mlx5_dpll_pins_ops, mdpll);
+ dpll_pin_put(mdpll->dpll_pin);
+ dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
+ dpll_device_put(mdpll->dpll);
+ kfree(mdpll);
+
+ mlx5_dpll_synce_status_set(mdev,
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
+}
+
+static int mlx5_dpll_suspend(struct auxiliary_device *adev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mlx5_dpll_resume(struct auxiliary_device *adev)
+{
+ return 0;
+}
+
+static const struct auxiliary_device_id mlx5_dpll_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".dpll", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5_dpll_id_table);
+
+static struct auxiliary_driver mlx5_dpll_driver = {
+ .name = "dpll",
+ .probe = mlx5_dpll_probe,
+ .remove = mlx5_dpll_remove,
+ .suspend = mlx5_dpll_suspend,
+ .resume = mlx5_dpll_resume,
+ .id_table = mlx5_dpll_id_table,
+};
+
+static int __init mlx5_dpll_init(void)
+{
+ return auxiliary_driver_register(&mlx5_dpll_driver);
+}
+
+static void __exit mlx5_dpll_exit(void)
+{
+ auxiliary_driver_unregister(&mlx5_dpll_driver);
+}
+
+module_init(mlx5_dpll_init);
+module_exit(mlx5_dpll_exit);
+
+MODULE_AUTHOR("Jiri Pirko <jiri@nvidia.com>");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) DPLL driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index c6b6e290fd79..0b1ac6e5c890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -12,11 +12,19 @@ struct mlx5e_dev *mlx5e_create_devlink(struct device *dev,
{
struct mlx5e_dev *mlx5e_dev;
struct devlink *devlink;
+ int err;
devlink = devlink_alloc_ns(&mlx5e_devlink_ops, sizeof(*mlx5e_dev),
devlink_net(priv_to_devlink(mdev)), dev);
if (!devlink)
return ERR_PTR(-ENOMEM);
+
+ err = devl_nested_devlink_set(priv_to_devlink(mdev), devlink);
+ if (err) {
+ devlink_free(devlink);
+ return ERR_PTR(err);
+ }
+
devlink_register(devlink);
return devlink_priv(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 12f56d0db0af..cc3fcd24b36d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -893,7 +893,7 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
mlx5e_xmit_xdp_doorbell(xdpsq);
if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
- xdp_do_flush_map();
+ xdp_do_flush();
__clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
index 7a01714b3780..a7ed87e9d842 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -78,6 +78,8 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
xa_for_each(&entry->ports, idx, port) {
dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[i].ft = port->mcast.ft;
+ if (port->vport_num == MLX5_VPORT_UPLINK)
+ dests[i].ft->flags |= MLX5_FLOW_TABLE_UPLINK_VPORT;
i++;
}
@@ -585,10 +587,6 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
- if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
- port->vport_num == MLX5_VPORT_UPLINK)
- rule_spec->flow_context.flow_source =
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -660,11 +658,6 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
if (!rule_spec)
return ERR_PTR(-ENOMEM);
- if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
- port->vport_num == MLX5_VPORT_UPLINK)
- rule_spec->flow_context.flow_source =
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
-
if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 1887a24ee414..d2ebe56c3977 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "eswitch.h"
+#include "lib/mlx5.h"
#include "esw/qos.h"
#include "en/port.h"
#define CREATE_TRACE_POINTS
@@ -701,10 +702,75 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
return err;
}
+static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
+{
+ struct ethtool_link_ksettings lksettings;
+ struct net_device *slave, *master;
+ u32 speed = SPEED_UNKNOWN;
+
+ /* Lock ensures a stable reference to master and slave netdevice
+ * while port speed of master is queried.
+ */
+ ASSERT_RTNL();
+
+ slave = mlx5_uplink_netdev_get(mdev);
+ if (!slave)
+ goto out;
+
+ master = netdev_master_upper_dev_get(slave);
+ if (master && !__ethtool_get_link_ksettings(master, &lksettings))
+ speed = lksettings.base.speed;
+
+out:
+ return speed;
+}
+
+static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max,
+ bool hold_rtnl_lock, struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!mlx5_lag_is_active(mdev))
+ goto skip_lag;
+
+ if (hold_rtnl_lock)
+ rtnl_lock();
+
+ *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev);
+
+ if (hold_rtnl_lock)
+ rtnl_unlock();
+
+ if (*link_speed_max != (u32)SPEED_UNKNOWN)
+ return 0;
+
+skip_lag:
+ err = mlx5_port_max_linkspeed(mdev, link_speed_max);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
+
+ return err;
+}
+
+static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
+ const char *name, u32 link_speed_max,
+ u64 value, struct netlink_ext_ack *extack)
+{
+ if (value > link_speed_max) {
+ pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
+ name, value, link_speed_max);
+ NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
+ u32 link_speed_max;
u32 bitmask;
int err;
@@ -712,6 +778,17 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
if (IS_ERR(vport))
return PTR_ERR(vport);
+ if (rate_mbps) {
+ err = mlx5_esw_qos_max_link_speed_get(esw->dev, &link_speed_max, false, NULL);
+ if (err)
+ return err;
+
+ err = mlx5_esw_qos_link_speed_verify(esw->dev, "Police",
+ link_speed_max, rate_mbps, NULL);
+ if (err)
+ return err;
+ }
+
mutex_lock(&esw->state_lock);
if (!vport->qos.enabled) {
/* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
@@ -744,12 +821,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
u64 value;
int err;
- err = mlx5_port_max_linkspeed(mdev, &link_speed_max);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
- return err;
- }
-
value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
@@ -758,12 +829,13 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return -EINVAL;
}
- if (value > link_speed_max) {
- pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
- name, value, link_speed_max);
- NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
- return -EINVAL;
- }
+ err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack);
+ if (err)
+ return err;
+
+ err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack);
+ if (err)
+ return err;
*rate = value;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 2fb2598b775e..1c220048ae9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -365,6 +365,8 @@ static const char *hsynd_str(u8 synd)
return "FFSER error";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR:
return "High temperature";
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
+ return "ICM fetch PCI data poisoned error";
default:
return "unrecognized error";
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 4bf15391525c..0857eebf4f07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -65,12 +65,12 @@ err_metadata:
return err;
}
-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 2
+#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
int err;
+ int i;
if (ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
@@ -98,11 +98,11 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- if (!err)
- err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
- if (err)
- goto err_rescan_drivers;
+ for (i = 0; i < ldev->ports; i++) {
+ err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ if (err)
+ goto err_rescan_drivers;
+ }
return 0;
@@ -112,8 +112,8 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ for (i = 0; i < ldev->ports; i++)
+ mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 15561965d2af..d17c9c31b165 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1405,9 +1405,9 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_sf_dev_table_destroy(dev);
- mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 05e148db9889..0f9b280514b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -14,7 +14,6 @@
struct mlx5_sf_dev_table {
struct xarray devices;
- unsigned int max_sfs;
phys_addr_t base_address;
u64 sf_bar_length;
struct notifier_block nb;
@@ -110,12 +109,6 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
sf_dev->parent_mdev = dev;
sf_dev->fn_id = fn_id;
- if (!table->max_sfs) {
- mlx5_adev_idx_free(id);
- kfree(sf_dev);
- err = -EOPNOTSUPP;
- goto add_err;
- }
sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
trace_mlx5_sf_dev_add(dev, sf_dev, id);
@@ -296,7 +289,6 @@ static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
- unsigned int max_sfs;
int err;
if (!mlx5_sf_dev_supported(dev))
@@ -310,13 +302,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
- if (MLX5_CAP_GEN(dev, max_num_sf))
- max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
- else
- max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
- table->max_sfs = max_sfs;
xa_init(&table->devices);
mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table;
@@ -332,7 +319,6 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
err = mlx5_sf_dev_vhca_arm_all(table);
if (err)
goto arm_err;
- mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
return;
arm_err:
@@ -340,7 +326,6 @@ arm_err:
add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
vhca_err:
- table->max_sfs = 0;
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index 2a66a427ef15..b99131e95e37 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -19,6 +19,12 @@ struct mlx5_sf_dev {
u16 fn_id;
};
+struct mlx5_sf_peer_devlink_event_ctx {
+ u16 fn_id;
+ struct devlink *devlink;
+ int err;
+};
+
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 8fe82f1191bb..169c2c68ed5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -8,6 +8,20 @@
#include "dev.h"
#include "devlink.h"
+static int mlx5_core_peer_devlink_set(struct mlx5_sf_dev *sf_dev, struct devlink *devlink)
+{
+ struct mlx5_sf_peer_devlink_event_ctx event_ctx = {
+ .fn_id = sf_dev->fn_id,
+ .devlink = devlink,
+ };
+ int ret;
+
+ ret = mlx5_blocking_notifier_call_chain(sf_dev->parent_mdev,
+ MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
+ &event_ctx);
+ return ret == NOTIFY_OK ? event_ctx.err : 0;
+}
+
static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
@@ -54,9 +68,21 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
goto init_one_err;
}
+
+ err = mlx5_core_peer_devlink_set(sf_dev, devlink);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
+ goto peer_devlink_set_err;
+ }
+
devlink_register(devlink);
return 0;
+peer_devlink_set_err:
+ if (mlx5_dev_is_lightweight(sf_dev->mdev))
+ mlx5_uninit_one_light(sf_dev->mdev);
+ else
+ mlx5_uninit_one(sf_dev->mdev);
init_one_err:
iounmap(mdev->iseg);
remap_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index e34a8f88c518..6c11e075cab0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -20,43 +20,36 @@ struct mlx5_sf {
u16 hw_state;
};
+static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port)
+{
+ struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port);
+
+ return container_of(mlx5_dl_port, struct mlx5_sf, dl_port);
+}
+
struct mlx5_sf_table {
struct mlx5_core_dev *dev; /* To refer from notifier context. */
- struct xarray port_indices; /* port index based lookup. */
- refcount_t refcount;
- struct completion disable_complete;
+ struct xarray function_ids; /* function id based lookup. */
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
struct notifier_block esw_nb;
struct notifier_block vhca_nb;
+ struct notifier_block mdev_nb;
};
static struct mlx5_sf *
-mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
-{
- return xa_load(&table->port_indices, port_index);
-}
-
-static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
{
- unsigned long index;
- struct mlx5_sf *sf;
-
- xa_for_each(&table->port_indices, index, sf) {
- if (sf->hw_fn_id == fn_id)
- return sf;
- }
- return NULL;
+ return xa_load(&table->function_ids, fn_id);
}
-static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
- return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
+ return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL);
}
-static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
- xa_erase(&table->port_indices, sf->port_index);
+ xa_erase(&table->function_ids, sf->hw_fn_id);
}
static struct mlx5_sf *
@@ -93,7 +86,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
sf->controller = controller;
- err = mlx5_sf_id_insert(table, sf);
+ err = mlx5_sf_function_id_insert(table, sf);
if (err)
goto insert_err;
@@ -111,28 +104,11 @@ id_err:
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
- mlx5_sf_id_erase(table, sf);
mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
kfree(sf);
}
-static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
-{
- struct mlx5_sf_table *table = dev->priv.sf_table;
-
- if (!table)
- return NULL;
-
- return refcount_inc_not_zero(&table->refcount) ? table : NULL;
-}
-
-static void mlx5_sf_table_put(struct mlx5_sf_table *table)
-{
- if (refcount_dec_and_test(&table->refcount))
- complete(&table->disable_complete);
-}
-
static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
{
switch (hw_state) {
@@ -172,26 +148,14 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
- struct mlx5_sf_table *table;
- struct mlx5_sf *sf;
- int err = 0;
-
- table = mlx5_sf_table_try_get(dev);
- if (!table)
- return -EOPNOTSUPP;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
- sf = mlx5_sf_lookup_by_index(table, dl_port->index);
- if (!sf) {
- err = -EOPNOTSUPP;
- goto sf_err;
- }
mutex_lock(&table->sf_state_lock);
*state = mlx5_sf_to_devlink_state(sf->hw_state);
*opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
mutex_unlock(&table->sf_state_lock);
-sf_err:
- mlx5_sf_table_put(table);
- return err;
+ return 0;
}
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
@@ -257,26 +221,10 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
- struct mlx5_sf_table *table;
- struct mlx5_sf *sf;
- int err;
-
- table = mlx5_sf_table_try_get(dev);
- if (!table) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
- return -EOPNOTSUPP;
- }
- sf = mlx5_sf_lookup_by_index(table, dl_port->index);
- if (!sf) {
- err = -ENODEV;
- goto out;
- }
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
- err = mlx5_sf_state_set(dev, table, sf, state, extack);
-out:
- mlx5_sf_table_put(table);
- return err;
+ return mlx5_sf_state_set(dev, table, sf, state, extack);
}
static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
@@ -335,32 +283,45 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
return 0;
}
+static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
+ mlx5_sf_hw_table_supported(dev);
+}
+
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *new_attr,
struct netlink_ext_ack *extack,
struct devlink_port **dl_port)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct mlx5_sf_table *table;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
int err;
err = mlx5_sf_new_check_attr(dev, new_attr, extack);
if (err)
return err;
- table = mlx5_sf_table_try_get(dev);
- if (!table) {
+ if (!mlx5_sf_table_supported(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "SF ports are not supported.");
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_mdev_switchdev_mode(dev)) {
NL_SET_ERR_MSG_MOD(extack,
- "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
+ "SF ports are only supported in eswitch switchdev mode.");
return -EOPNOTSUPP;
}
- err = mlx5_sf_add(dev, table, new_attr, extack, dl_port);
- mlx5_sf_table_put(table);
- return err;
+
+ return mlx5_sf_add(dev, table, new_attr, extack, dl_port);
}
static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
+ mutex_lock(&table->sf_state_lock);
+
+ mlx5_sf_function_id_erase(table, sf);
+
if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
mlx5_sf_free(table, sf);
} else if (mlx5_sf_is_active(sf)) {
@@ -376,6 +337,16 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf);
}
+
+ mutex_unlock(&table->sf_state_lock);
+}
+
+static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ struct mlx5_eswitch *esw = table->dev->priv.eswitch;
+
+ mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
+ mlx5_sf_dealloc(table, sf);
}
int mlx5_devlink_sf_port_del(struct devlink *devlink,
@@ -383,32 +354,11 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_sf_table *table;
- struct mlx5_sf *sf;
- int err = 0;
-
- table = mlx5_sf_table_try_get(dev);
- if (!table) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
- return -EOPNOTSUPP;
- }
- sf = mlx5_sf_lookup_by_index(table, dl_port->index);
- if (!sf) {
- err = -ENODEV;
- goto sf_err;
- }
-
- mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
- mlx5_sf_id_erase(table, sf);
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
- mutex_lock(&table->sf_state_lock);
- mlx5_sf_dealloc(table, sf);
- mutex_unlock(&table->sf_state_lock);
-sf_err:
- mlx5_sf_table_put(table);
- return err;
+ mlx5_sf_del(table, sf);
+ return 0;
}
static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
@@ -433,14 +383,10 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
bool update = false;
struct mlx5_sf *sf;
- table = mlx5_sf_table_try_get(table->dev);
- if (!table)
- return 0;
-
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
if (!sf)
- goto sf_err;
+ goto unlock;
/* When driver is attached or detached to a function, an event
* notifies such state change.
@@ -450,46 +396,18 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
sf->hw_state = event->new_vhca_state;
trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
sf->hw_fn_id, sf->hw_state);
-sf_err:
+unlock:
mutex_unlock(&table->sf_state_lock);
- mlx5_sf_table_put(table);
return 0;
}
-static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
-{
- init_completion(&table->disable_complete);
- refcount_set(&table->refcount, 1);
-}
-
-static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
+static void mlx5_sf_del_all(struct mlx5_sf_table *table)
{
- struct mlx5_eswitch *esw = table->dev->priv.eswitch;
unsigned long index;
struct mlx5_sf *sf;
- /* At this point, no new user commands can start and no vhca event can
- * arrive. It is safe to destroy all user created SFs.
- */
- xa_for_each(&table->port_indices, index, sf) {
- mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
- mlx5_sf_id_erase(table, sf);
- mlx5_sf_dealloc(table, sf);
- }
-}
-
-static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
-{
- if (!refcount_read(&table->refcount))
- return;
-
- /* Balances with refcount_set; drop the reference so that new user cmd cannot start
- * and new vhca event handler cannot run.
- */
- mlx5_sf_table_put(table);
- wait_for_completion(&table->disable_complete);
-
- mlx5_sf_deactivate_all(table);
+ xa_for_each(&table->function_ids, index, sf)
+ mlx5_sf_del(table, sf);
}
static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -498,11 +416,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
const struct mlx5_esw_event_info *mode = data;
switch (mode->new_mode) {
- case MLX5_ESWITCH_OFFLOADS:
- mlx5_sf_table_enable(table);
- break;
case MLX5_ESWITCH_LEGACY:
- mlx5_sf_table_disable(table);
+ mlx5_sf_del_all(table);
break;
default:
break;
@@ -511,10 +426,29 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
return 0;
}
-static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
+static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
{
- return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
- mlx5_sf_hw_table_supported(dev);
+ struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
+ struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
+ int ret = NOTIFY_DONE;
+ struct mlx5_sf *sf;
+
+ if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
+ return NOTIFY_DONE;
+
+
+ mutex_lock(&table->sf_state_lock);
+ sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
+ if (!sf)
+ goto out;
+
+ event_ctx->err = devl_port_fn_devlink_set(&sf->dl_port.dl_port,
+ event_ctx->devlink);
+
+ ret = NOTIFY_OK;
+out:
+ mutex_unlock(&table->sf_state_lock);
+ return ret;
}
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
@@ -531,9 +465,8 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
mutex_init(&table->sf_state_lock);
table->dev = dev;
- xa_init(&table->port_indices);
+ xa_init(&table->function_ids);
dev->priv.sf_table = table;
- refcount_set(&table->refcount, 0);
table->esw_nb.notifier_call = mlx5_sf_esw_event;
err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
if (err)
@@ -544,6 +477,9 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
if (err)
goto vhca_err;
+ table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
+ mlx5_blocking_notifier_register(dev, &table->mdev_nb);
+
return 0;
vhca_err:
@@ -562,10 +498,10 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
if (!table)
return;
+ mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
- WARN_ON(refcount_read(&table->refcount));
mutex_destroy(&table->sf_state_lock);
- WARN_ON(!xa_empty(&table->port_indices));
+ WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 5b83da08692d..6ea88a581804 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -55,6 +55,12 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
return action_type_to_str[action_id];
}
+static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
+ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
+}
+
static const enum dr_action_valid_state
next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
[DR_ACTION_DOMAIN_NIC_INGRESS] = {
@@ -1163,12 +1169,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
bool ignore_flow_level,
u32 flow_source)
{
+ struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest;
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
struct mlx5dr_action *action;
bool reformat_req = false;
+ bool is_ft_wire = false;
+ u16 num_dst_ft = 0;
u32 num_of_ref = 0;
u32 ref_act_cnt;
+ u16 last_dest;
int ret;
int i;
@@ -1210,11 +1220,22 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
break;
case DR_ACTION_TYP_FT:
+ if (num_dst_ft &&
+ !mlx5dr_action_supp_fwd_fdb_multi_ft(dmn->mdev)) {
+ mlx5dr_dbg(dmn, "multiple FT destinations not supported\n");
+ goto free_ref_actions;
+ }
+ num_dst_ft++;
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- if (dest_action->dest_tbl->is_fw_tbl)
+ if (dest_action->dest_tbl->is_fw_tbl) {
hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
- else
+ } else {
hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
+ if (dest_action->dest_tbl->is_wire_ft) {
+ is_ft_wire = true;
+ last_dest = i;
+ }
+ }
break;
default:
@@ -1223,6 +1244,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
}
}
+ /* In multidest, the FW does the iterator in the RX except of the last
+ * one that done in the TX.
+ * So, if one of the ft target is wire, put it at the end of the dest list.
+ */
+ if (is_ft_wire && num_dst_ft > 1) {
+ tmp_hw_dest = hw_dests[last_dest];
+ hw_dests[last_dest] = hw_dests[num_of_dests - 1];
+ hw_dests[num_of_dests - 1] = tmp_hw_dest;
+ }
+
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
goto free_ref_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 6c59de3e28f6..55dc7383477c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1064,6 +1064,7 @@ struct mlx5dr_action_sampler {
struct mlx5dr_action_dest_tbl {
u8 is_fw_tbl:1;
+ u8 is_wire_ft:1;
union {
struct mlx5dr_table *tbl;
struct {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 14f6df88b1f9..50c2554c9ccf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -209,10 +209,17 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
+ struct mlx5dr_action *tbl_action;
if (mlx5dr_is_fw_table(dest_ft))
return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
- return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
+
+ tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
+ if (tbl_action)
+ tbl_action->dest_tbl->is_wire_ft =
+ dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0;
+
+ return tbl_action;
}
static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 694de9513b9f..954ba0826c61 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -471,7 +471,7 @@ out:
return err;
}
-static int mlxbf_gige_remove(struct platform_device *pdev)
+static void mlxbf_gige_remove(struct platform_device *pdev)
{
struct mlxbf_gige *priv = platform_get_drvdata(pdev);
@@ -479,8 +479,6 @@ static int mlxbf_gige_remove(struct platform_device *pdev)
phy_disconnect(priv->netdev->phydev);
mlxbf_gige_mdio_remove(priv);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static void mlxbf_gige_shutdown(struct platform_device *pdev)
@@ -499,7 +497,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
static struct platform_driver mlxbf_gige_driver = {
.probe = mlxbf_gige_probe,
- .remove = mlxbf_gige_remove,
+ .remove_new = mlxbf_gige_remove,
.shutdown = mlxbf_gige_shutdown,
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index e5474d3e34db..c6bc5819ce43 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -624,7 +624,7 @@ struct mlxsw_linecards {
struct mlxsw_linecard_types_info *types_info;
struct list_head event_ops_list;
struct mutex event_ops_list_lock; /* Locks accesses to event ops list */
- struct mlxsw_linecard linecards[];
+ struct mlxsw_linecard linecards[] __counted_by(count);
};
static inline struct mlxsw_linecard *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 70f9b5e85a26..745438d8ae10 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -32,8 +32,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER, 0x18, 17, 12),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
@@ -44,6 +43,9 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
MLXSW_AFK_ELEMENT_INFO_U32(FDB_MISS, 0x40, 0, 1),
MLXSW_AFK_ELEMENT_INFO_U32(L4_PORT_RANGE, 0x40, 1, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_3, 0x40, 17, 4),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_4_7, 0x40, 21, 4),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x40, 25, 4),
};
struct mlxsw_afk {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 2eac7582c31a..1c76aa3ffab7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -33,10 +33,12 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_IP_TTL_,
MLXSW_AFK_ELEMENT_IP_ECN,
MLXSW_AFK_ELEMENT_IP_DSCP,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER,
MLXSW_AFK_ELEMENT_FDB_MISS,
MLXSW_AFK_ELEMENT_L4_PORT_RANGE,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
MLXSW_AFK_ELEMENT_MAX,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index d637c0348fa1..53b150b7ae4e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -34,7 +34,7 @@ struct mlxsw_env {
u8 num_of_slots; /* Including the main board. */
u8 max_eeprom_len; /* Maximum module EEPROM transaction length. */
struct mutex line_cards_lock; /* Protects line cards. */
- struct mlxsw_env_line_card *line_cards[];
+ struct mlxsw_env_line_card *line_cards[] __counted_by(num_of_slots);
};
static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
@@ -775,7 +775,7 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
int err;
mlxsw_reg_mtbr_pack(mtbr_pl, slot_index,
- MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 0fd290d776ff..9c12e1feb643 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -293,7 +293,7 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index,
- MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
if (err) {
dev_err(dev, "Failed to query module temperature sensor\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
index af37e650a8ad..e8d6fe35bf36 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
@@ -132,6 +132,7 @@ static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev,
struct mlxsw_linecard *linecard = linecard_bdev->linecard;
struct mlxsw_linecard_dev *linecard_dev;
struct devlink *devlink;
+ int err;
devlink = devlink_alloc(&mlxsw_linecard_dev_devlink_ops,
sizeof(*linecard_dev), &adev->dev);
@@ -141,8 +142,12 @@ static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev,
linecard_dev->linecard = linecard_bdev->linecard;
linecard_bdev->linecard_dev = linecard_dev;
+ err = devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink);
+ if (err) {
+ devlink_free(devlink);
+ return err;
+ }
devlink_register(devlink);
- devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink);
return 0;
}
@@ -151,9 +156,7 @@ static void mlxsw_linecard_bdev_remove(struct auxiliary_device *adev)
struct mlxsw_linecard_bdev *linecard_bdev =
container_of(adev, struct mlxsw_linecard_bdev, adev);
struct devlink *devlink = priv_to_devlink(linecard_bdev->linecard_dev);
- struct mlxsw_linecard *linecard = linecard_bdev->linecard;
- devlink_linecard_nested_dl_set(linecard->devlink_linecard, NULL);
devlink_unregister(devlink);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 70d7fff24fa2..f709e44c76a8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -31,6 +31,7 @@
/* External cooling devices, allowed for binding to mlxsw thermal zones. */
static char * const mlxsw_thermal_external_allowed_cdev[] = {
"mlxreg_fan",
+ "emc2305",
};
struct mlxsw_cooling_states {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index d23f293e285c..1e150ce1c73a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -424,9 +424,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
if (in_mbox) {
reg_size = mlxsw_i2c_get_reg_size(in_mbox);
- num = reg_size / mlxsw_i2c->block_size;
- if (reg_size % mlxsw_i2c->block_size)
- num++;
+ num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size);
if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
dev_err(&client->dev, "Could not acquire lock");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ae556ddd7624..9970921ceef3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -9551,7 +9551,7 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1);
#define MLXSW_REG_MTBR_ID 0x900F
#define MLXSW_REG_MTBR_BASE_LEN 0x10 /* base length, without records */
#define MLXSW_REG_MTBR_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_MTBR_REC_MAX_COUNT 47 /* firmware limitation */
+#define MLXSW_REG_MTBR_REC_MAX_COUNT 1
#define MLXSW_REG_MTBR_LEN (MLXSW_REG_MTBR_BASE_LEN + \
MLXSW_REG_MTBR_REC_LEN * \
MLXSW_REG_MTBR_REC_MAX_COUNT)
@@ -9597,12 +9597,12 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
MLXSW_REG_MTBR_REC_LEN, 0x00, false);
static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index,
- u16 base_sensor_index, u8 num_rec)
+ u16 base_sensor_index)
{
MLXSW_REG_ZERO(mtbr, payload);
mlxsw_reg_mtbr_slot_index_set(payload, slot_index);
mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index);
- mlxsw_reg_mtbr_num_rec_set(payload, num_rec);
+ mlxsw_reg_mtbr_num_rec_set(payload, 1);
}
/* Error codes from temperatute reading */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
index b1178b7a7f51..99eeafdc8d1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -45,8 +45,7 @@ static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp,
}
static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = {
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER,
MLXSW_AFK_ELEMENT_SRC_IP_0_31,
MLXSW_AFK_ELEMENT_DST_IP_0_31,
};
@@ -89,8 +88,9 @@ static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
}
static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = {
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7,
MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_SRC_IP_96_127,
MLXSW_AFK_ELEMENT_SRC_IP_64_95,
MLXSW_AFK_ELEMENT_SRC_IP_32_63,
@@ -142,6 +142,8 @@ static void
mlxsw_sp2_mr_tcam_rule_parse4(struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_mr_route_key *key)
{
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER,
+ key->vrid, GENMASK(11, 0));
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
(char *) &key->source.addr4,
(char *) &key->source_mask.addr4, 4);
@@ -154,6 +156,13 @@ static void
mlxsw_sp2_mr_tcam_rule_parse6(struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_mr_route_key *key)
{
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3,
+ key->vrid, GENMASK(3, 0));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7,
+ key->vrid >> 4, GENMASK(3, 0));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ key->vrid >> 8, GENMASK(3, 0));
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
&key->source.addr6.s6_addr[0x0],
&key->source_mask.addr6.s6_addr[0x0], 4);
@@ -189,11 +198,6 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule,
rulei = mlxsw_sp_acl_rule_rulei(rule);
rulei->priority = priority;
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
- key->vrid, GENMASK(7, 0));
- mlxsw_sp_acl_rulei_keymask_u32(rulei,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
- key->vrid >> 8, GENMASK(3, 0));
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index e2aced7ab454..95f63fcf4ba1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
* is 2^ACL_MAX_BF_LOG
*/
bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
- bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
+ bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)),
GFP_KERNEL);
if (!bf)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index cb746a43b24b..4b3564f5fd65 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -171,20 +171,22 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8),
- MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER, 0x04, 20, 11, 0, true),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_3, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_4_7, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
@@ -220,7 +222,7 @@ static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
- MLXSW_AFK_BLOCK(0x3C, mlxsw_sp_afk_element_info_ipv4_4),
+ MLXSW_AFK_BLOCK(0x3D, mlxsw_sp_afk_element_info_ipv4_5),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
@@ -322,12 +324,12 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8),
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4),
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
@@ -341,7 +343,7 @@ static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
- MLXSW_AFK_BLOCK(0x35, mlxsw_sp_afk_element_info_ipv4_4b),
+ MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
MLXSW_AFK_BLOCK(0x47, mlxsw_sp_afk_element_info_ipv6_2b),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index ee59c79156e4..50e591420bd9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -24,7 +24,7 @@ struct mlxsw_sp_counter_pool {
spinlock_t counter_pool_lock; /* Protects counter pool allocations */
atomic_t active_entries_count;
unsigned int sub_pools_count;
- struct mlxsw_sp_counter_sub_pool sub_pools[];
+ struct mlxsw_sp_counter_sub_pool sub_pools[] __counted_by(sub_pools_count);
};
static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index debd2c466f11..82a95125d9ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3107,7 +3107,7 @@ struct mlxsw_sp_nexthop_group_info {
gateway:1, /* routes using the group use a gateway */
is_resilient:1;
struct list_head list; /* member in nh_res_grp_list */
- struct mlxsw_sp_nexthop nexthops[];
+ struct mlxsw_sp_nexthop nexthops[] __counted_by(count);
};
static struct mlxsw_sp_rif *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index b3472fb94617..af50ff9e5f26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -31,7 +31,7 @@ struct mlxsw_sp_span {
refcount_t policer_id_base_ref_count;
atomic_t active_entries_count;
int entries_count;
- struct mlxsw_sp_span_entry entries[];
+ struct mlxsw_sp_span_entry entries[] __counted_by(entries_count);
};
struct mlxsw_sp_span_analyzed_port {
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index c11b118dc415..ddd87ef71caf 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1228,7 +1228,7 @@ err_mem_region:
return err;
}
-static int ks8842_remove(struct platform_device *pdev)
+static void ks8842_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -1239,7 +1239,6 @@ static int ks8842_remove(struct platform_device *pdev)
iounmap(adapter->hw_addr);
free_netdev(netdev);
release_mem_region(iomem->start, resource_size(iomem));
- return 0;
}
@@ -1248,7 +1247,7 @@ static struct platform_driver ks8842_platform_driver = {
.name = DRV_NAME,
},
.probe = ks8842_probe,
- .remove = ks8842_remove,
+ .remove_new = ks8842_remove,
};
module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 7f49042484bd..2a7f29854267 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -327,11 +327,9 @@ static int ks8851_probe_par(struct platform_device *pdev)
return ks8851_probe_common(netdev, dev, msg_enable);
}
-static int ks8851_remove_par(struct platform_device *pdev)
+static void ks8851_remove_par(struct platform_device *pdev)
{
ks8851_remove_common(&pdev->dev);
-
- return 0;
}
static const struct of_device_id ks8851_match_table[] = {
@@ -347,7 +345,7 @@ static struct platform_driver ks8851_driver = {
.pm = &ks8851_pm_ops,
},
.probe = ks8851_probe_par,
- .remove = ks8851_remove_par,
+ .remove_new = ks8851_remove_par,
};
module_platform_driver(ks8851_driver);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index c81cdeb4d4e7..f940895b14e8 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1466,9 +1466,15 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
static void lan743x_phy_close(struct lan743x_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct phy_device *phydev = netdev->phydev;
phy_stop(netdev->phydev);
phy_disconnect(netdev->phydev);
+
+ /* using phydev here as phy_disconnect NULLs netdev->phydev */
+ if (phy_is_pseudo_fixed_link(phydev))
+ fixed_phy_unregister(phydev);
+
}
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 0d6e79af2410..8e4101628fbd 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -1261,7 +1261,7 @@ cleanup_ports:
return err;
}
-static int lan966x_remove(struct platform_device *pdev)
+static void lan966x_remove(struct platform_device *pdev)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
@@ -1280,13 +1280,11 @@ static int lan966x_remove(struct platform_device *pdev)
lan966x_ptp_deinit(lan966x);
debugfs_remove_recursive(lan966x->debugfs_root);
-
- return 0;
}
static struct platform_driver lan966x_driver = {
.probe = lan966x_probe,
- .remove = lan966x_remove,
+ .remove_new = lan966x_remove,
.driver = {
.name = "lan966x-switch",
.of_match_table = lan966x_match,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index dc9af480bfea..d1f7fc8b1b71 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -911,7 +911,7 @@ cleanup_pnode:
return err;
}
-static int mchp_sparx5_remove(struct platform_device *pdev)
+static void mchp_sparx5_remove(struct platform_device *pdev)
{
struct sparx5 *sparx5 = platform_get_drvdata(pdev);
@@ -931,8 +931,6 @@ static int mchp_sparx5_remove(struct platform_device *pdev)
/* Unregister netdevs */
sparx5_unregister_notifier_blocks(sparx5);
destroy_workqueue(sparx5->mact_queue);
-
- return 0;
}
static const struct of_device_id mchp_sparx5_match[] = {
@@ -943,7 +941,7 @@ MODULE_DEVICE_TABLE(of, mchp_sparx5_match);
static struct platform_driver mchp_sparx5_driver = {
.probe = mchp_sparx5_probe,
- .remove = mchp_sparx5_remove,
+ .remove_new = mchp_sparx5_remove,
.driver = {
.name = "sparx5-switch",
.of_match_table = mchp_sparx5_match,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
index c2c3397c5898..59bfbda29bb3 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
@@ -300,7 +300,7 @@ static int vcap_show_admin(struct vcap_control *vctrl,
vcap_show_admin_info(vctrl, admin, out);
list_for_each_entry(elem, &admin->rules, list) {
vrule = vcap_decode_rule(elem);
- if (IS_ERR_OR_NULL(vrule)) {
+ if (IS_ERR(vrule)) {
ret = PTR_ERR(vrule);
break;
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 3da99b62797d..96dc69e7141f 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -558,7 +558,7 @@ irq_map_fail:
return ret;
}
-static int moxart_remove(struct platform_device *pdev)
+static void moxart_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -566,8 +566,6 @@ static int moxart_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, ndev->irq, ndev);
moxart_mac_free_memory(ndev);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id moxart_mac_match[] = {
@@ -578,7 +576,7 @@ MODULE_DEVICE_TABLE(of, moxart_mac_match);
static struct platform_driver moxart_mac_driver = {
.probe = moxart_mac_probe,
- .remove = moxart_remove,
+ .remove_new = moxart_remove,
.driver = {
.name = "moxart-ethernet",
.of_match_table = moxart_mac_match,
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 151b42465348..993212c3a7da 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -392,7 +392,7 @@ out_free_devlink:
return err;
}
-static int mscc_ocelot_remove(struct platform_device *pdev)
+static void mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
@@ -408,13 +408,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
unregister_switchdev_notifier(&ocelot_switchdev_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
devlink_free(ocelot->devlink);
-
- return 0;
}
static struct platform_driver mscc_ocelot_driver = {
.probe = mscc_ocelot_probe,
- .remove = mscc_ocelot_remove,
+ .remove_new = mscc_ocelot_remove,
.driver = {
.name = "ocelot-switch",
.of_match_table = mscc_ocelot_match,
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 3f371faeb6d0..2b6e097df28f 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -227,7 +227,7 @@ MODULE_ALIAS("platform:jazzsonic");
#include "sonic.c"
-static int jazz_sonic_device_remove(struct platform_device *pdev)
+static void jazz_sonic_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local* lp = netdev_priv(dev);
@@ -237,13 +237,11 @@ static int jazz_sonic_device_remove(struct platform_device *pdev)
lp->descriptors, lp->descriptors_laddr);
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver jazz_sonic_driver = {
.probe = jazz_sonic_probe,
- .remove = jazz_sonic_device_remove,
+ .remove_new = jazz_sonic_device_remove,
.driver = {
.name = jazz_sonic_string,
},
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index b16f7c830f9b..2fc63860dbdb 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -532,7 +532,7 @@ MODULE_ALIAS("platform:macsonic");
#include "sonic.c"
-static int mac_sonic_platform_remove(struct platform_device *pdev)
+static void mac_sonic_platform_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local* lp = netdev_priv(dev);
@@ -541,13 +541,11 @@ static int mac_sonic_platform_remove(struct platform_device *pdev)
dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver mac_sonic_platform_driver = {
.probe = mac_sonic_platform_probe,
- .remove = mac_sonic_platform_remove,
+ .remove_new = mac_sonic_platform_remove,
.driver = {
.name = "macsonic",
},
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 52fef34d43f9..8943e7244310 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -249,7 +249,7 @@ MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
#include "sonic.c"
-static int xtsonic_device_remove(struct platform_device *pdev)
+static void xtsonic_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local *lp = netdev_priv(dev);
@@ -260,13 +260,11 @@ static int xtsonic_device_remove(struct platform_device *pdev)
lp->descriptors, lp->descriptors_laddr);
release_region (dev->base_addr, SONIC_MEM_SIZE);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver xtsonic_driver = {
.probe = xtsonic_probe,
- .remove = xtsonic_device_remove,
+ .remove_new = xtsonic_device_remove,
.driver = {
.name = xtsonic_string,
},
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index 5d9db8c2a5b4..45be6954d5aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -256,7 +256,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
if (xdp_redir)
- xdp_do_flush_map();
+ xdp_do_flush();
if (tx_ring->wr_ptr_add)
nfp_net_tx_xmit_more_flush(tx_ring);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index ba27bbc68f85..97f4798f4b42 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1397,7 +1397,7 @@ free_netdev:
return err;
}
-static int nixge_remove(struct platform_device *pdev)
+static void nixge_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct nixge_priv *priv = netdev_priv(ndev);
@@ -1412,13 +1412,11 @@ static int nixge_remove(struct platform_device *pdev)
mdiobus_unregister(priv->mii_bus);
free_netdev(ndev);
-
- return 0;
}
static struct platform_driver nixge_driver = {
.probe = nixge_probe,
- .remove = nixge_remove,
+ .remove_new = nixge_remove,
.driver = {
.name = "nixge",
.of_match_table = nixge_dt_ids,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 1a4a272f4c5c..dd3e58a1319c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1417,7 +1417,7 @@ err_exit:
return ret;
}
-static int lpc_eth_drv_remove(struct platform_device *pdev)
+static void lpc_eth_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct netdata_local *pldat = netdev_priv(ndev);
@@ -1436,8 +1436,6 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
clk_disable_unprepare(pldat->clk);
clk_put(pldat->clk);
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1505,7 +1503,7 @@ MODULE_DEVICE_TABLE(of, lpc_eth_match);
static struct platform_driver lpc_eth_driver = {
.probe = lpc_eth_drv_probe,
- .remove = lpc_eth_drv_remove,
+ .remove_new = lpc_eth_drv_remove,
#ifdef CONFIG_PM
.suspend = lpc_eth_drv_suspend,
.resume = lpc_eth_drv_resume,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index aae4131f146a..1dbc3cb50b1d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/skbuff.h>
#include "ionic_if.h"
#include "ionic_regs.h"
@@ -217,7 +218,7 @@ struct ionic_desc_info {
};
unsigned int bytes;
unsigned int nbufs;
- struct ionic_buf_info bufs[IONIC_MAX_FRAGS];
+ struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1];
ionic_desc_cb cb;
void *cb_arg;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 2c3e36b2dd7f..edc14730ce88 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3831,6 +3831,18 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
qtype, qti->max_sg_elems);
dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
qtype, qti->sg_desc_stride);
+
+ if (qti->max_sg_elems >= IONIC_MAX_FRAGS) {
+ qti->max_sg_elems = IONIC_MAX_FRAGS - 1;
+ dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n",
+ qtype, qti->max_sg_elems);
+ }
+
+ if (qti->max_sg_elems > MAX_SKB_FRAGS) {
+ qti->max_sg_elems = MAX_SKB_FRAGS;
+ dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n",
+ qtype, qti->max_sg_elems);
+ }
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 44466e8c5d77..ccc1b1d407e4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -1243,25 +1243,84 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ bool too_many_frags = false;
+ skb_frag_t *frag;
+ int desc_bufs;
+ int chunk_len;
+ int frag_rem;
+ int tso_rem;
+ int seg_rem;
+ bool encap;
+ int hdrlen;
int ndescs;
int err;
/* Each desc is mss long max, so a descriptor for each gso_seg */
- if (skb_is_gso(skb))
+ if (skb_is_gso(skb)) {
ndescs = skb_shinfo(skb)->gso_segs;
- else
+ } else {
ndescs = 1;
+ if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) {
+ too_many_frags = true;
+ goto linearize;
+ }
+ }
- /* If non-TSO, just need 1 desc and nr_frags sg elems */
- if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
+ /* If non-TSO, or no frags to check, we're done */
+ if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags)
return ndescs;
- /* Too many frags, so linearize */
- err = skb_linearize(skb);
- if (err)
- return err;
+ /* We need to scan the skb to be sure that none of the MTU sized
+ * packets in the TSO will require more sgs per descriptor than we
+ * can support. We loop through the frags, add up the lengths for
+ * a packet, and count the number of sgs used per packet.
+ */
+ tso_rem = skb->len;
+ frag = skb_shinfo(skb)->frags;
+ encap = skb->encapsulation;
+
+ /* start with just hdr in first part of first descriptor */
+ if (encap)
+ hdrlen = skb_inner_tcp_all_headers(skb);
+ else
+ hdrlen = skb_tcp_all_headers(skb);
+ seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size);
+ frag_rem = hdrlen;
+
+ while (tso_rem > 0) {
+ desc_bufs = 0;
+ while (seg_rem > 0) {
+ desc_bufs++;
+
+ /* We add the +1 because we can take buffers for one
+ * more than we have SGs: one for the initial desc data
+ * in addition to the SG segments that might follow.
+ */
+ if (desc_bufs > q->max_sg_elems + 1) {
+ too_many_frags = true;
+ goto linearize;
+ }
+
+ if (frag_rem == 0) {
+ frag_rem = skb_frag_size(frag);
+ frag++;
+ }
+ chunk_len = min(frag_rem, seg_rem);
+ frag_rem -= chunk_len;
+ tso_rem -= chunk_len;
+ seg_rem -= chunk_len;
+ }
+
+ seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size);
+ }
- stats->linearize++;
+linearize:
+ if (too_many_frags) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ stats->linearize++;
+ }
return ndescs;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 19bb16daf4e7..3270df72541b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -718,7 +718,7 @@ err_undo_netdev:
return ret;
}
-static int emac_remove(struct platform_device *pdev)
+static void emac_remove(struct platform_device *pdev)
{
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);
@@ -742,8 +742,6 @@ static int emac_remove(struct platform_device *pdev)
iounmap(adpt->phy.base);
free_netdev(netdev);
-
- return 0;
}
static void emac_shutdown(struct platform_device *pdev)
@@ -762,7 +760,7 @@ static void emac_shutdown(struct platform_device *pdev)
static struct platform_driver emac_platform_driver = {
.probe = emac_probe,
- .remove = emac_remove,
+ .remove_new = emac_remove,
.driver = {
.name = "qcom-emac",
.of_match_table = emac_dt_match,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 7df9f9f8e134..e62391180032 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2878,7 +2878,7 @@ out_release:
return error;
}
-static int ravb_remove(struct platform_device *pdev)
+static void ravb_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
@@ -2905,8 +2905,6 @@ static int ravb_remove(struct platform_device *pdev)
reset_control_assert(priv->rstc);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static int ravb_wol_setup(struct net_device *ndev)
@@ -3044,7 +3042,7 @@ static const struct dev_pm_ops ravb_dev_pm_ops = {
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
- .remove = ravb_remove,
+ .remove_new = ravb_remove,
.driver = {
.name = "ravb",
.pm = &ravb_dev_pm_ops,
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index fc01ad3f340d..0023ff254915 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1979,7 +1979,7 @@ static void rswitch_deinit(struct rswitch_private *priv)
rswitch_clock_disable(priv);
}
-static int renesas_eth_sw_remove(struct platform_device *pdev)
+static void renesas_eth_sw_remove(struct platform_device *pdev)
{
struct rswitch_private *priv = platform_get_drvdata(pdev);
@@ -1989,13 +1989,11 @@ static int renesas_eth_sw_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver renesas_eth_sw_driver_platform = {
.probe = renesas_eth_sw_probe,
- .remove = renesas_eth_sw_remove,
+ .remove_new = renesas_eth_sw_remove,
.driver = {
.name = "renesas_eth_sw",
.of_match_table = renesas_eth_sw_of_table,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 274ea16c0a1f..475e1e8c1d35 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3431,7 +3431,7 @@ out_release:
return ret;
}
-static int sh_eth_drv_remove(struct platform_device *pdev)
+static void sh_eth_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -3441,8 +3441,6 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
sh_mdio_release(mdp);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -3562,7 +3560,7 @@ MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
static struct platform_driver sh_eth_driver = {
.probe = sh_eth_drv_probe,
- .remove = sh_eth_drv_remove,
+ .remove_new = sh_eth_drv_remove,
.id_table = sh_eth_id_table,
.driver = {
.name = CARDNAME,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index fb59ff94509a..e6e130dbe1de 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -169,13 +169,11 @@ err_out:
* Description: this function calls the main to free the net resources
* and calls the platforms hook and release the resources (e.g. mem).
*/
-static int sxgbe_platform_remove(struct platform_device *pdev)
+static void sxgbe_platform_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
sxgbe_drv_remove(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -226,7 +224,7 @@ MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
static struct platform_driver sxgbe_platform_driver = {
.probe = sxgbe_platform_probe,
- .remove = sxgbe_platform_remove,
+ .remove_new = sxgbe_platform_remove,
.driver = {
.name = SXGBE_RESOURCE_NAME,
.pm = &sxgbe_platform_pm_ops,
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 96065dfc747b..76356dadf233 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -819,7 +819,7 @@ err_out:
return err;
}
-static int sgiseeq_remove(struct platform_device *pdev)
+static void sgiseeq_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sgiseeq_private *sp = netdev_priv(dev);
@@ -828,13 +828,11 @@ static int sgiseeq_remove(struct platform_device *pdev)
dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
sp->srings_dma, DMA_BIDIRECTIONAL);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver sgiseeq_driver = {
.probe = sgiseeq_probe,
- .remove = sgiseeq_remove,
+ .remove_new = sgiseeq_remove,
.driver = {
.name = "sgiseeq",
}
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 8d2d7ea2ebef..c9e17a8208a9 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -1260,7 +1260,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
- xdp_do_flush_map();
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index f54200f03e15..b04fdbb8aece 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -108,11 +108,17 @@
#define PTP_MIN_LENGTH 63
#define PTP_ADDR_IPV4 0xe0000181 /* 224.0.1.129 */
-#define PTP_ADDR_IPV6 {0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
- 0, 0x01, 0x81} /* ff0e::181 */
+
+/* ff0e::181 */
+static const struct in6_addr ptp_addr_ipv6 = { { {
+ 0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0x81 } } };
+
+/* 01-1B-19-00-00-00 */
+static const u8 ptp_addr_ether[ETH_ALEN] __aligned(2) = {
+ 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 };
+
#define PTP_EVENT_PORT 319
#define PTP_GENERAL_PORT 320
-#define PTP_ADDR_ETHER {0x01, 0x1b, 0x19, 0, 0, 0} /* 01-1B-19-00-00-00 */
/* Annoyingly the format of the version numbers are different between
* versions 1 and 2 so it isn't possible to simply look for 1 or 2.
@@ -1296,7 +1302,7 @@ static int efx_ptp_insert_ipv4_filter(struct efx_nic *efx,
static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx,
struct list_head *filter_list,
- struct in6_addr *addr, u16 port,
+ const struct in6_addr *addr, u16 port,
unsigned long expiry)
{
struct efx_filter_spec spec;
@@ -1309,11 +1315,10 @@ static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx,
static int efx_ptp_insert_eth_multicast_filter(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- const u8 addr[ETH_ALEN] = PTP_ADDR_ETHER;
struct efx_filter_spec spec;
efx_ptp_init_filter(efx, &spec);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, addr);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, ptp_addr_ether);
spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
spec.ether_type = htons(ETH_P_1588);
return efx_ptp_insert_filter(efx, &ptp->rxfilters_mcast, &spec, 0);
@@ -1346,15 +1351,13 @@ static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
* PTP over IPv6 and Ethernet
*/
if (efx_ptp_use_mac_tx_timestamps(efx)) {
- struct in6_addr ipv6_addr = {{PTP_ADDR_IPV6}};
-
rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast,
- &ipv6_addr, PTP_EVENT_PORT, 0);
+ &ptp_addr_ipv6, PTP_EVENT_PORT, 0);
if (rc < 0)
goto fail;
rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast,
- &ipv6_addr, PTP_GENERAL_PORT, 0);
+ &ptp_addr_ipv6, PTP_GENERAL_PORT, 0);
if (rc < 0)
goto fail;
@@ -1379,9 +1382,7 @@ static bool efx_ptp_valid_unicast_event_pkt(struct sk_buff *skb)
ip_hdr(skb)->protocol == IPPROTO_UDP &&
udp_hdr(skb)->source == htons(PTP_EVENT_PORT);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- struct in6_addr mcast_addr = {{PTP_ADDR_IPV6}};
-
- return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &mcast_addr) &&
+ return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &ptp_addr_ipv6) &&
ipv6_hdr(skb)->nexthdr == IPPROTO_UDP &&
udp_hdr(skb)->source == htons(PTP_EVENT_PORT);
}
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 1776f7f8a7a9..a7346e965bfe 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -1285,7 +1285,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
- xdp_do_flush_map();
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 8fc3f5272fa7..98d0b561a057 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -962,7 +962,7 @@ out_free:
return err;
}
-static int ioc3eth_remove(struct platform_device *pdev)
+static void ioc3eth_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
@@ -973,8 +973,6 @@ static int ioc3eth_remove(struct platform_device *pdev)
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
free_netdev(dev);
-
- return 0;
}
@@ -1275,7 +1273,7 @@ static void ioc3_set_multicast_list(struct net_device *dev)
static struct platform_driver ioc3eth_driver = {
.probe = ioc3eth_probe,
- .remove = ioc3eth_remove,
+ .remove_new = ioc3eth_remove,
.driver = {
.name = "ioc3-eth",
}
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 6d850ea2b94c..18b6f93d875e 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -854,19 +854,17 @@ static int meth_probe(struct platform_device *pdev)
return 0;
}
-static int meth_remove(struct platform_device *pdev)
+static void meth_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver meth_driver = {
.probe = meth_probe,
- .remove = meth_remove,
+ .remove_new = meth_remove,
.driver = {
.name = "meth",
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 032eccf8eb42..758347616535 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2411,7 +2411,7 @@ static int smc_drv_probe(struct platform_device *pdev)
return ret;
}
-static int smc_drv_remove(struct platform_device *pdev)
+static void smc_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct smc_local *lp = netdev_priv(ndev);
@@ -2436,8 +2436,6 @@ static int smc_drv_remove(struct platform_device *pdev)
release_mem_region(res->start, SMC_IO_EXTENT);
free_netdev(ndev);
-
- return 0;
}
static int smc_drv_suspend(struct device *dev)
@@ -2480,7 +2478,7 @@ static const struct dev_pm_ops smc_drv_pm_ops = {
static struct platform_driver smc_driver = {
.probe = smc_drv_probe,
- .remove = smc_drv_remove,
+ .remove_new = smc_drv_remove,
.driver = {
.name = CARDNAME,
.pm = &smc_drv_pm_ops,
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index cb590db625e8..31cb7d0166f0 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2314,7 +2314,7 @@ static int smsc911x_init(struct net_device *dev)
return 0;
}
-static int smsc911x_drv_remove(struct platform_device *pdev)
+static void smsc911x_drv_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct smsc911x_data *pdata;
@@ -2348,8 +2348,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
free_netdev(dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
/* standard register acces */
@@ -2668,7 +2666,7 @@ MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
- .remove = smsc911x_drv_remove,
+ .remove_new = smsc911x_drv_remove,
.driver = {
.name = SMSC_CHIPNAME,
.pm = SMSC911X_PM_OPS,
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index f358ea003193..0891e9e49ecb 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -780,7 +780,7 @@ static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
u16 pkts)
{
if (xdp_res & NETSEC_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & NETSEC_XDP_TX)
netsec_xdp_ring_tx_db(priv, pkts);
@@ -2150,7 +2150,7 @@ free_ndev:
return ret;
}
-static int netsec_remove(struct platform_device *pdev)
+static void netsec_remove(struct platform_device *pdev)
{
struct netsec_priv *priv = platform_get_drvdata(pdev);
@@ -2162,8 +2162,6 @@ static int netsec_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
free_netdev(priv->ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -2211,7 +2209,7 @@ MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
static struct platform_driver netsec_driver = {
.probe = netsec_probe,
- .remove = netsec_remove,
+ .remove_new = netsec_remove,
.driver = {
.name = "netsec",
.pm = &netsec_pm_ops,
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 4838d2383a43..eed24e67c5a6 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1719,7 +1719,7 @@ out_del_napi:
return ret;
}
-static int ave_remove(struct platform_device *pdev)
+static void ave_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ave_private *priv = netdev_priv(ndev);
@@ -1727,8 +1727,6 @@ static int ave_remove(struct platform_device *pdev)
unregister_netdev(ndev);
netif_napi_del(&priv->napi_rx);
netif_napi_del(&priv->napi_tx);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1976,7 +1974,7 @@ MODULE_DEVICE_TABLE(of, of_ave_match);
static struct platform_driver ave_driver = {
.probe = ave_probe,
- .remove = ave_remove,
+ .remove_new = ave_remove,
.driver = {
.name = "ave",
.pm = AVE_PM_OPS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 06c6871f8788..a2b9e289aa36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -239,6 +239,17 @@ config DWMAC_INTEL_PLAT
the stmmac device driver. This driver is used for the Intel Keem Bay
SoC.
+config DWMAC_LOONGSON1
+ tristate "Loongson1 GMAC support"
+ default MACH_LOONGSON32
+ depends on OF && (MACH_LOONGSON32 || COMPILE_TEST)
+ help
+ Support for ethernet controller on Loongson1 SoC.
+
+ This selects Loongson1 SoC glue layer support for the stmmac
+ device driver. This driver is used for Loongson1-based boards
+ like Loongson LS1B/LS1C.
+
config DWMAC_TEGRA
tristate "NVIDIA Tegra MGBE support"
depends on ARCH_TEGRA || COMPILE_TEST
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 5b57aee19267..80e598bd4255 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
+obj-$(CONFIG_DWMAC_LOONGSON1) += dwmac-loongson1.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
obj-$(CONFIG_DWMAC_TEGRA) += dwmac-tegra.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 58a7f08e8d78..643ee6d8d4dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -115,7 +115,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(gmac))
return PTR_ERR(gmac);
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -124,13 +124,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
anarion_gmac_init(pdev, gmac);
plat_dat->bsp_priv = gmac;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret) {
- stmmac_remove_config_dt(pdev, plat_dat);
- return ret;
- }
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct of_device_id anarion_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 61ebf36da13d..ec924c6c76c6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -435,15 +435,14 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(stmmac_res.addr))
return PTR_ERR(stmmac_res.addr);
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
-
- goto remove_config;
+ return ret;
}
ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
@@ -458,25 +457,17 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
remove:
data->remove(pdev);
-remove_config:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- const struct dwc_eth_dwmac_data *data;
-
- data = device_get_match_data(&pdev->dev);
+ const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
data->remove(pdev);
-
- stmmac_remove_config_dt(pdev, priv->plat);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index 20fc455b3337..598eff926815 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -27,7 +27,7 @@ static int dwmac_generic_probe(struct platform_device *pdev)
return ret;
if (pdev->dev.of_node) {
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat)) {
dev_err(&pdev->dev, "dt configuration failed\n");
return PTR_ERR(plat_dat);
@@ -46,17 +46,7 @@ static int dwmac_generic_probe(struct platform_device *pdev)
plat_dat->unicast_filter_entries = 1;
}
- ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- if (pdev->dev.of_node)
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id dwmac_generic_match[] = {
@@ -77,7 +67,6 @@ MODULE_DEVICE_TABLE(of, dwmac_generic_match);
static struct platform_driver dwmac_generic_driver = {
.probe = dwmac_generic_probe,
- .remove_new = stmmac_pltfr_remove,
.driver = {
.name = STMMAC_RESOURCE_NAME,
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index df34e34cc14f..8f730ada71f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -331,15 +331,14 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "failed to get match data\n");
- ret = -EINVAL;
- goto err_match_data;
+ return -EINVAL;
}
dwmac->ops = data;
@@ -348,7 +347,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
ret = imx_dwmac_parse_dt(dwmac, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to parse OF data\n");
- goto err_parse_dt;
+ return ret;
}
if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
@@ -365,7 +364,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
ret = imx_dwmac_clks_config(dwmac, true);
if (ret)
- goto err_clks_config;
+ return ret;
ret = imx_dwmac_init(pdev, dwmac);
if (ret)
@@ -385,10 +384,6 @@ err_drv_probe:
imx_dwmac_exit(pdev, plat_dat->bsp_priv);
err_dwmac_init:
imx_dwmac_clks_config(dwmac, false);
-err_clks_config:
-err_parse_dt:
-err_match_data:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 0a20c3d24722..19c93b998fb3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -241,29 +241,25 @@ static int ingenic_mac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
mac = devm_kzalloc(&pdev->dev, sizeof(*mac), GFP_KERNEL);
- if (!mac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!mac)
+ return -ENOMEM;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "No of match data provided\n");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
/* Get MAC PHY control register */
mac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mode-reg");
if (IS_ERR(mac->regmap)) {
dev_err(&pdev->dev, "%s: Failed to get syscon regmap\n", __func__);
- ret = PTR_ERR(mac->regmap);
- goto err_remove_config_dt;
+ return PTR_ERR(mac->regmap);
}
if (!of_property_read_u32(pdev->dev.of_node, "tx-clk-delay-ps", &tx_delay_ps)) {
@@ -272,8 +268,7 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->tx_delay = tx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
}
@@ -283,8 +278,7 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->rx_delay = rx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
}
@@ -295,18 +289,9 @@ static int ingenic_mac_probe(struct platform_device *pdev)
ret = ingenic_mac_init(plat_dat);
if (ret)
- goto err_remove_config_dt;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
+ return ret;
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index d352a14f9d48..70edc5232379 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -85,17 +85,15 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat)) {
dev_err(&pdev->dev, "dt configuration failed\n");
return PTR_ERR(plat_dat);
}
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
dwmac->dev = &pdev->dev;
dwmac->tx_clk = NULL;
@@ -110,10 +108,8 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
/* Enable TX clock */
if (dwmac->data->tx_clk_en) {
dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- if (IS_ERR(dwmac->tx_clk)) {
- ret = PTR_ERR(dwmac->tx_clk);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(dwmac->tx_clk))
+ return PTR_ERR(dwmac->tx_clk);
clk_prepare_enable(dwmac->tx_clk);
@@ -126,7 +122,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set tx_clk\n");
- goto err_remove_config_dt;
+ return ret;
}
}
}
@@ -140,7 +136,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- goto err_remove_config_dt;
+ return ret;
}
}
}
@@ -158,15 +154,10 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret) {
clk_disable_unprepare(dwmac->tx_clk);
- goto err_remove_config_dt;
+ return ret;
}
return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
}
static void intel_eth_plat_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 9b0200749109..281687d7083b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -384,22 +384,20 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
if (val)
return val;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
- if (!gmac) {
- err = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!gmac)
+ return -ENOMEM;
gmac->pdev = pdev;
err = ipq806x_gmac_of_parse(gmac);
if (err) {
dev_err(dev, "device tree parsing error\n");
- goto err_remove_config_dt;
+ return err;
}
regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -459,11 +457,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
err = ipq806x_gmac_configure_qsgmii_params(gmac);
if (err)
- goto err_remove_config_dt;
+ return err;
err = ipq806x_gmac_configure_qsgmii_pcs_speed(gmac);
if (err)
- goto err_remove_config_dt;
+ return err;
}
plat_dat->has_gmac = true;
@@ -473,21 +471,12 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->tx_fifo_size = 8192;
plat_dat->rx_fifo_size = 8192;
- err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (err)
- goto err_remove_config_dt;
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
err_unsupported_phy:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- err = -EINVAL;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return err;
+ return -EINVAL;
}
static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
new file mode 100644
index 000000000000..3e86810717d3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Loongson-1 DWMAC glue layer
+ *
+ * Copyright (C) 2011-2023 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define LS1B_GMAC0_BASE (0x1fe10000)
+#define LS1B_GMAC1_BASE (0x1fe20000)
+
+/* Loongson-1 SYSCON Registers */
+#define LS1X_SYSCON0 (0x0)
+#define LS1X_SYSCON1 (0x4)
+
+/* Loongson-1B SYSCON Register Bits */
+#define GMAC1_USE_UART1 BIT(4)
+#define GMAC1_USE_UART0 BIT(3)
+
+#define GMAC1_SHUT BIT(13)
+#define GMAC0_SHUT BIT(12)
+
+#define GMAC1_USE_TXCLK BIT(3)
+#define GMAC0_USE_TXCLK BIT(2)
+#define GMAC1_USE_PWM23 BIT(1)
+#define GMAC0_USE_PWM01 BIT(0)
+
+/* Loongson-1C SYSCON Register Bits */
+#define GMAC_SHUT BIT(6)
+
+#define PHY_INTF_SELI GENMASK(30, 28)
+#define PHY_INTF_MII FIELD_PREP(PHY_INTF_SELI, 0)
+#define PHY_INTF_RMII FIELD_PREP(PHY_INTF_SELI, 4)
+
+struct ls1x_dwmac {
+ struct plat_stmmacenet_data *plat_dat;
+ struct regmap *regmap;
+};
+
+static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+{
+ struct ls1x_dwmac *dwmac = priv;
+ struct plat_stmmacenet_data *plat = dwmac->plat_dat;
+ struct regmap *regmap = dwmac->regmap;
+ struct resource *res;
+ unsigned long reg_base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Could not get IO_MEM resources\n");
+ return -EINVAL;
+ }
+ reg_base = (unsigned long)res->start;
+
+ if (reg_base == LS1B_GMAC0_BASE) {
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ regmap_update_bits(regmap, LS1X_SYSCON0,
+ GMAC0_USE_TXCLK | GMAC0_USE_PWM01,
+ 0);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ regmap_update_bits(regmap, LS1X_SYSCON0,
+ GMAC0_USE_TXCLK | GMAC0_USE_PWM01,
+ GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ plat->phy_interface);
+ return -EOPNOTSUPP;
+ }
+
+ regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
+ } else if (reg_base == LS1B_GMAC1_BASE) {
+ regmap_update_bits(regmap, LS1X_SYSCON0,
+ GMAC1_USE_UART1 | GMAC1_USE_UART0,
+ GMAC1_USE_UART1 | GMAC1_USE_UART0);
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ regmap_update_bits(regmap, LS1X_SYSCON1,
+ GMAC1_USE_TXCLK | GMAC1_USE_PWM23,
+ 0);
+
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ regmap_update_bits(regmap, LS1X_SYSCON1,
+ GMAC1_USE_TXCLK | GMAC1_USE_PWM23,
+ GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ plat->phy_interface);
+ return -EOPNOTSUPP;
+ }
+
+ regmap_update_bits(regmap, LS1X_SYSCON1, GMAC1_SHUT, 0);
+ } else {
+ dev_err(&pdev->dev, "Invalid Ethernet MAC base address %lx",
+ reg_base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+{
+ struct ls1x_dwmac *dwmac = priv;
+ struct plat_stmmacenet_data *plat = dwmac->plat_dat;
+ struct regmap *regmap = dwmac->regmap;
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
+ PHY_INTF_MII);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
+ PHY_INTF_RMII);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY-mode %u\n",
+ plat->phy_interface);
+ return -EOPNOTSUPP;
+ }
+
+ regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
+
+ return 0;
+}
+
+static int ls1x_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct regmap *regmap;
+ struct ls1x_dwmac *dwmac;
+ int (*init)(struct platform_device *pdev, void *priv);
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ /* Probe syscon */
+ regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "loongson,ls1-syscon");
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "Unable to find syscon\n");
+
+ init = of_device_get_match_data(&pdev->dev);
+ if (!init) {
+ dev_err(&pdev->dev, "No of match data provided\n");
+ return -EINVAL;
+ }
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->init = init;
+ dwmac->plat_dat = plat_dat;
+ dwmac->regmap = regmap;
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id ls1x_dwmac_match[] = {
+ {
+ .compatible = "loongson,ls1b-gmac",
+ .data = &ls1b_dwmac_syscon_init,
+ },
+ {
+ .compatible = "loongson,ls1c-emac",
+ .data = &ls1c_dwmac_syscon_init,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ls1x_dwmac_match);
+
+static struct platform_driver ls1x_dwmac_driver = {
+ .probe = ls1x_dwmac_probe,
+ .driver = {
+ .name = "loongson1-dwmac",
+ .of_match_table = ls1x_dwmac_match,
+ },
+};
+module_platform_driver(ls1x_dwmac_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson-1 DWMAC glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index d0aa674ce705..4c810d8f5bea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -37,7 +37,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -46,8 +46,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
if (IS_ERR(reg)) {
dev_err(&pdev->dev, "syscon lookup failed\n");
- ret = PTR_ERR(reg);
- goto err_remove_config_dt;
+ return PTR_ERR(reg);
}
if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) {
@@ -56,23 +55,13 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
regmap_update_bits(reg, LPC18XX_CREG_CREG6,
LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct of_device_id lpc18xx_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index cd796ec04132..2a9132d6d743 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -656,7 +656,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -665,7 +665,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
ret = mediatek_dwmac_clks_config(priv_plat, true);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -675,8 +675,6 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
err_drv_probe:
mediatek_dwmac_clks_config(priv_plat, false);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 959f88c6da16..a16bfa9089ea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -52,35 +52,22 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
dwmac->reg = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dwmac->reg)) {
- ret = PTR_ERR(dwmac->reg);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(dwmac->reg))
+ return PTR_ERR(dwmac->reg);
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct of_device_id meson6_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 0b159dc0d5f6..b23944aa344e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -400,33 +400,27 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
dwmac->data = (const struct meson8b_dwmac_data *)
of_device_get_match_data(&pdev->dev);
- if (!dwmac->data) {
- ret = -EINVAL;
- goto err_remove_config_dt;
- }
+ if (!dwmac->data)
+ return -EINVAL;
dwmac->regs = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dwmac->regs)) {
- ret = PTR_ERR(dwmac->regs);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(dwmac->regs))
+ return PTR_ERR(dwmac->regs);
dwmac->dev = &pdev->dev;
ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
if (ret) {
dev_err(&pdev->dev, "missing phy-mode property\n");
- goto err_remove_config_dt;
+ return ret;
}
/* use 2ns as fallback since this value was previously hardcoded */
@@ -448,53 +442,40 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
if (dwmac->rx_delay_ps > 3000 || dwmac->rx_delay_ps % 200) {
dev_err(dwmac->dev,
"The RGMII RX delay range is 0..3000ps in 200ps steps");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
} else {
if (dwmac->rx_delay_ps != 0 && dwmac->rx_delay_ps != 2000) {
dev_err(dwmac->dev,
"The only allowed RGMII RX delays values are: 0ps, 2000ps");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
}
dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
"timing-adjustment");
- if (IS_ERR(dwmac->timing_adj_clk)) {
- ret = PTR_ERR(dwmac->timing_adj_clk);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(dwmac->timing_adj_clk))
+ return PTR_ERR(dwmac->timing_adj_clk);
ret = meson8b_init_rgmii_delays(dwmac);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = dwmac->data->set_phy_mode(dwmac);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = meson8b_init_prg_eth(dwmac);
if (ret)
- goto err_remove_config_dt;
+ return ret;
plat_dat->bsp_priv = dwmac;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct meson8b_dwmac_data meson8b_dwmac_data = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index d920a50dd16c..382e8de1255d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1824,7 +1824,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1836,18 +1836,16 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = rk_fix_speed;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
- if (IS_ERR(plat_dat->bsp_priv)) {
- ret = PTR_ERR(plat_dat->bsp_priv);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(plat_dat->bsp_priv))
+ return PTR_ERR(plat_dat->bsp_priv);
ret = rk_gmac_clk_init(plat_dat);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = rk_gmac_powerup(plat_dat->bsp_priv);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -1857,8 +1855,6 @@ static int rk_gmac_probe(struct platform_device *pdev)
err_gmac_powerdown:
rk_gmac_powerdown(plat_dat->bsp_priv);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 9bf102bbc6a0..ba2ce776bd4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -400,21 +400,19 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
if (IS_ERR(dwmac->stmmac_ocp_rst)) {
ret = PTR_ERR(dwmac->stmmac_ocp_rst);
dev_err(dev, "error getting reset control of ocp %d\n", ret);
- goto err_remove_config_dt;
+ return ret;
}
reset_control_deassert(dwmac->stmmac_ocp_rst);
@@ -422,7 +420,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
ret = socfpga_dwmac_parse_data(dwmac, dev);
if (ret) {
dev_err(dev, "Unable to parse OF data\n");
- goto err_remove_config_dt;
+ return ret;
}
dwmac->ops = ops;
@@ -431,7 +429,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ndev = platform_get_drvdata(pdev);
stpriv = netdev_priv(ndev);
@@ -492,8 +490,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
err_dvr_remove:
stmmac_dvr_remove(&pdev->dev);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 9289bb87c3e3..5d630affb4d1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -105,7 +105,7 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, err,
"failed to get resources\n");
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat),
"dt configuration failed\n");
@@ -141,13 +141,7 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
if (err)
return err;
- err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (err) {
- stmmac_remove_config_dt(pdev, plat_dat);
- return err;
- }
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct of_device_id starfive_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 0d653bbb931b..4445cddc4cbe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -273,20 +273,18 @@ static int sti_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
ret = sti_dwmac_parse_data(dwmac, pdev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
- goto err_remove_config_dt;
+ return ret;
}
dwmac->fix_retime_src = data->fix_retime_src;
@@ -296,7 +294,7 @@ static int sti_dwmac_probe(struct platform_device *pdev)
ret = clk_prepare_enable(dwmac->clk);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = sti_dwmac_set_mode(dwmac);
if (ret)
@@ -310,8 +308,6 @@ static int sti_dwmac_probe(struct platform_device *pdev)
disable_clk:
clk_disable_unprepare(dwmac->clk);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index a0e276783e65..d8d3c729f219 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -372,21 +372,18 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "no of match data provided\n");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
dwmac->ops = data;
@@ -395,14 +392,14 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
ret = stm32_dwmac_parse_data(dwmac, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
- goto err_remove_config_dt;
+ return ret;
}
plat_dat->bsp_priv = dwmac;
ret = stm32_dwmac_init(plat_dat);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -412,8 +409,6 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
err_clk_disable:
stm32_dwmac_clk_disable(dwmac);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 465ff1fd4785..137741b94122 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1224,7 +1224,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return -EINVAL;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1244,7 +1244,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
if (ret)
- goto dwmac_deconfig;
+ return ret;
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
@@ -1295,8 +1295,6 @@ dwmac_exit:
sun8i_dwmac_exit(pdev, gmac);
dwmac_syscon:
sun8i_dwmac_unset_syscon(gmac);
-dwmac_deconfig:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index beceeae579bf..2653a9f0958c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -108,36 +108,31 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
- if (!gmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!gmac)
+ return -ENOMEM;
ret = of_get_phy_mode(dev->of_node, &gmac->interface);
if (ret && ret != -ENODEV) {
dev_err(dev, "Can't get phy-mode\n");
- goto err_remove_config_dt;
+ return ret;
}
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
dev_err(dev, "could not get tx clock\n");
- ret = PTR_ERR(gmac->tx_clk);
- goto err_remove_config_dt;
+ return PTR_ERR(gmac->tx_clk);
}
/* Optional regulator for PHY */
gmac->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(gmac->regulator)) {
- if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto err_remove_config_dt;
- }
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_info(dev, "no regulator found\n");
gmac->regulator = NULL;
}
@@ -155,7 +150,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -165,8 +160,6 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
err_gmac_exit:
sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index e0f3cbd36852..362f85136c3e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -284,7 +284,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
if (err < 0)
goto disable_clks;
- plat = stmmac_probe_config_dt(pdev, res.mac);
+ plat = devm_stmmac_probe_config_dt(pdev, res.mac);
if (IS_ERR(plat)) {
err = PTR_ERR(plat);
goto disable_clks;
@@ -303,7 +303,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!plat->mdio_bus_data) {
err = -ENOMEM;
- goto remove;
+ goto disable_clks;
}
}
@@ -321,7 +321,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
500, 500 * 2000);
if (err < 0) {
dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n");
- goto remove;
+ goto disable_clks;
}
plat->serdes_powerup = mgbe_uphy_lane_bringup_serdes_up;
@@ -342,12 +342,10 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
err = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (err < 0)
- goto remove;
+ goto disable_clks;
return 0;
-remove:
- stmmac_remove_config_dt(pdev, plat);
disable_clks:
clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 22d113fb8e09..a5a5cfa989c6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -220,15 +220,13 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto remove_config;
- }
+ if (!dwmac)
+ return -ENOMEM;
spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
@@ -238,7 +236,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
ret = visconti_eth_clock_probe(pdev, plat_dat);
if (ret)
- goto remove_config;
+ return ret;
visconti_eth_init_hw(pdev, plat_dat);
@@ -252,22 +250,14 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
remove:
visconti_eth_clock_remove(pdev);
-remove_config:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
static void visconti_eth_dwmac_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
stmmac_pltfr_remove(pdev);
-
visconti_eth_clock_remove(pdev);
-
- stmmac_remove_config_dt(pdev, priv->plat);
}
static const struct of_device_id visconti_eth_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ed1a5a31a491..bb1dbf4c9f6c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4407,6 +4407,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
WARN_ON(tx_q->tx_skbuff[first_entry]);
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
+ /* DWMAC IPs can be synthesized to support tx coe only for a few tx
+ * queues. In that case, checksum offloading for those queues that don't
+ * support tx coe needs to fallback to software checksum calculation.
+ */
+ if (csum_insertion &&
+ priv->plat->tx_queues_cfg[queue].coe_unsupported) {
+ if (unlikely(skb_checksum_help(skb)))
+ goto dma_map_err;
+ csum_insertion = !csum_insertion;
+ }
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 2f0678f15fb7..1ffde555da47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -276,6 +276,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->tx_queues_cfg[queue].use_prio = true;
}
+ plat->tx_queues_cfg[queue].coe_unsupported =
+ of_property_read_bool(q_node, "snps,coe-unsupported");
+
queue++;
}
if (queue != plat->tx_queues_to_use) {
@@ -385,6 +388,22 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
}
/**
+ * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
+ * @pdev: platform_device structure
+ * @plat: driver data platform structure
+ *
+ * Release resources claimed by stmmac_probe_config_dt().
+ */
+static void stmmac_remove_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
+ of_node_put(plat->phy_node);
+ of_node_put(plat->mdio_node);
+}
+
+/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
* @mac: MAC address to use
@@ -392,7 +411,7 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
* this function is to read the driver parameters from device-tree and
* set some private fields that will be used by the main at runtime.
*/
-struct plat_stmmacenet_data *
+static struct plat_stmmacenet_data *
stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
{
struct device_node *np = pdev->dev.of_node;
@@ -662,43 +681,14 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return plat;
}
-
-/**
- * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
- * @pdev: platform_device structure
- * @plat: driver data platform structure
- *
- * Release resources claimed by stmmac_probe_config_dt().
- */
-void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
-{
- clk_disable_unprepare(plat->stmmac_clk);
- clk_disable_unprepare(plat->pclk);
- of_node_put(plat->phy_node);
- of_node_put(plat->mdio_node);
-}
#else
struct plat_stmmacenet_data *
-stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
-{
- return ERR_PTR(-EINVAL);
-}
-
-struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
{
return ERR_PTR(-EINVAL);
}
-
-void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
-{
-}
#endif /* CONFIG_OF */
-EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt);
-EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
@@ -807,7 +797,7 @@ static void devm_stmmac_pltfr_remove(void *data)
{
struct platform_device *pdev = data;
- stmmac_pltfr_remove_no_dt(pdev);
+ stmmac_pltfr_remove(pdev);
}
/**
@@ -834,12 +824,12 @@ int devm_stmmac_pltfr_probe(struct platform_device *pdev,
EXPORT_SYMBOL_GPL(devm_stmmac_pltfr_probe);
/**
- * stmmac_pltfr_remove_no_dt
+ * stmmac_pltfr_remove
* @pdev: pointer to the platform device
* Description: This undoes the effects of stmmac_pltfr_probe() by removing the
* driver and calling the platform's exit() callback.
*/
-void stmmac_pltfr_remove_no_dt(struct platform_device *pdev)
+void stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -848,23 +838,6 @@ void stmmac_pltfr_remove_no_dt(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
stmmac_pltfr_exit(pdev, plat);
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_remove_no_dt);
-
-/**
- * stmmac_pltfr_remove
- * @pdev: platform device pointer
- * Description: this function calls the main to free the net resources
- * and calls the platforms hook and release the resources (e.g. mem).
- */
-void stmmac_pltfr_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct plat_stmmacenet_data *plat = priv->plat;
-
- stmmac_pltfr_remove_no_dt(pdev);
- stmmac_remove_config_dt(pdev, plat);
-}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index c5565b2a70ac..bb6fc7e59aed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -12,11 +12,7 @@
#include "stmmac.h"
struct plat_stmmacenet_data *
-stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
-struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
-void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
@@ -32,7 +28,6 @@ int stmmac_pltfr_probe(struct platform_device *pdev,
int devm_stmmac_pltfr_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res);
-void stmmac_pltfr_remove_no_dt(struct platform_device *pdev);
void stmmac_pltfr_remove(struct platform_device *pdev);
extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 011d74087f86..21431f43e4c2 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -10132,7 +10132,7 @@ err_out:
return err;
}
-static int niu_of_remove(struct platform_device *op)
+static void niu_of_remove(struct platform_device *op)
{
struct net_device *dev = platform_get_drvdata(op);
@@ -10165,7 +10165,6 @@ static int niu_of_remove(struct platform_device *op)
free_netdev(dev);
}
- return 0;
}
static const struct of_device_id niu_match[] = {
@@ -10183,7 +10182,7 @@ static struct platform_driver niu_of_driver = {
.of_match_table = niu_match,
},
.probe = niu_of_probe,
- .remove = niu_of_remove,
+ .remove_new = niu_of_remove,
};
#endif /* CONFIG_SPARC64 */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index cc34d92d2e3d..16c86b13c185 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1234,7 +1234,7 @@ static int bigmac_sbus_probe(struct platform_device *op)
return bigmac_ether_init(op, qec_op);
}
-static int bigmac_sbus_remove(struct platform_device *op)
+static void bigmac_sbus_remove(struct platform_device *op)
{
struct bigmac *bp = platform_get_drvdata(op);
struct device *parent = op->dev.parent;
@@ -1255,8 +1255,6 @@ static int bigmac_sbus_remove(struct platform_device *op)
bp->bblock_dvma);
free_netdev(net_dev);
-
- return 0;
}
static const struct of_device_id bigmac_sbus_match[] = {
@@ -1274,7 +1272,7 @@ static struct platform_driver bigmac_sbus_driver = {
.of_match_table = bigmac_sbus_match,
},
.probe = bigmac_sbus_probe,
- .remove = bigmac_sbus_remove,
+ .remove_new = bigmac_sbus_remove,
};
module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index b37360f44972..aedd13c94225 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -933,7 +933,7 @@ static int qec_sbus_probe(struct platform_device *op)
return qec_ether_init(op);
}
-static int qec_sbus_remove(struct platform_device *op)
+static void qec_sbus_remove(struct platform_device *op)
{
struct sunqe *qp = platform_get_drvdata(op);
struct net_device *net_dev = qp->dev;
@@ -948,8 +948,6 @@ static int qec_sbus_remove(struct platform_device *op)
qp->buffers, qp->buffers_dvma);
free_netdev(net_dev);
-
- return 0;
}
static const struct of_device_id qec_sbus_match[] = {
@@ -967,7 +965,7 @@ static struct platform_driver qec_sbus_driver = {
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
- .remove = qec_sbus_remove,
+ .remove_new = qec_sbus_remove,
};
static int __init qec_init(void)
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index c499a14314f1..391a1bc7f446 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -511,7 +511,7 @@ out_clk_disable:
return ret;
}
-static int spl2sw_remove(struct platform_device *pdev)
+static void spl2sw_remove(struct platform_device *pdev)
{
struct spl2sw_common *comm;
int i;
@@ -538,8 +538,6 @@ static int spl2sw_remove(struct platform_device *pdev)
spl2sw_mdio_remove(comm);
clk_disable_unprepare(comm->clk);
-
- return 0;
}
static const struct of_device_id spl2sw_of_match[] = {
@@ -551,7 +549,7 @@ MODULE_DEVICE_TABLE(of, spl2sw_of_match);
static struct platform_driver spl2sw_driver = {
.probe = spl2sw_probe,
- .remove = spl2sw_remove,
+ .remove_new = spl2sw_remove,
.driver = {
.name = "sp7021_emac",
.of_match_table = spl2sw_of_match,
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 80eeeb463c4f..bc9b5d18427d 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1151,14 +1151,12 @@ fail:
return rc;
}
-static int cpmac_remove(struct platform_device *pdev)
+static void cpmac_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver cpmac_driver = {
@@ -1166,7 +1164,7 @@ static struct platform_driver cpmac_driver = {
.name = "cpmac",
},
.probe = cpmac_probe,
- .remove = cpmac_remove,
+ .remove_new = cpmac_remove,
};
int __init cpmac_init(void)
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 0ec85635dfd6..764ed298b570 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1360,7 +1360,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
* particular hardware is sharing a common queue, so the
* incoming device might change per packet.
*/
- xdp_do_flush_map();
+ xdp_do_flush();
break;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2eb9d5a32588..5d756df133eb 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2002,7 +2002,7 @@ err_free_netdev:
* Called when removing the device driver. We disable clock usage and release
* the resources taken up by the driver and unregister network device
*/
-static int davinci_emac_remove(struct platform_device *pdev)
+static void davinci_emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_priv *priv = netdev_priv(ndev);
@@ -2022,8 +2022,6 @@ static int davinci_emac_remove(struct platform_device *pdev)
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
free_netdev(ndev);
-
- return 0;
}
static int davinci_emac_suspend(struct device *dev)
@@ -2076,7 +2074,7 @@ static struct platform_driver davinci_emac_driver = {
.of_match_table = davinci_emac_of_match,
},
.probe = davinci_emac_probe,
- .remove = davinci_emac_remove,
+ .remove_new = davinci_emac_remove,
};
/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 89b6d23e9937..628c87dc1d28 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -673,7 +673,7 @@ bail_out:
return ret;
}
-static int davinci_mdio_remove(struct platform_device *pdev)
+static void davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
@@ -686,8 +686,6 @@ static int davinci_mdio_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -766,7 +764,7 @@ static struct platform_driver davinci_mdio_driver = {
.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
},
.probe = davinci_mdio_probe,
- .remove = davinci_mdio_remove,
+ .remove_new = davinci_mdio_remove,
};
static int __init davinci_mdio_init(void)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 933b84666574..c1da70f247d4 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -433,6 +433,17 @@ int emac_set_port_state(struct prueth_emac *emac,
return ret;
}
+void icssg_config_half_duplex(struct prueth_emac *emac)
+{
+ u32 val;
+
+ if (!emac->half_duplex)
+ return;
+
+ val = get_random_u32();
+ writel(val, emac->dram.va + HD_RAND_SEED_OFFSET);
+}
+
void icssg_config_set_speed(struct prueth_emac *emac)
{
u8 fw_speed;
@@ -453,5 +464,8 @@ void icssg_config_set_speed(struct prueth_emac *emac)
return;
}
+ if (emac->duplex == DUPLEX_HALF)
+ fw_speed |= FW_LINK_SPEED_HD;
+
writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 4914d0ef58e9..482ae82a99c1 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1029,6 +1029,8 @@ static void emac_adjust_link(struct net_device *ndev)
* values
*/
if (emac->link) {
+ if (emac->duplex == DUPLEX_HALF)
+ icssg_config_half_duplex(emac);
/* Set the RGMII cfg for gig en and full duplex */
icssg_update_rgmii_cfg(prueth->miig_rt, emac);
@@ -1147,9 +1149,13 @@ static int emac_phy_connect(struct prueth_emac *emac)
return -ENODEV;
}
+ if (!emac->half_duplex) {
+ dev_dbg(prueth->dev, "half duplex mode is not supported\n");
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ }
+
/* remove unsupported modes */
- phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
@@ -2113,6 +2119,10 @@ static int prueth_probe(struct platform_device *pdev)
eth0_node->name);
goto exit_iep;
}
+
+ if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
+ prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+
prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
@@ -2124,6 +2134,9 @@ static int prueth_probe(struct platform_device *pdev)
goto netdev_exit;
}
+ if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
+ prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+
prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
}
@@ -2313,8 +2326,13 @@ static const struct prueth_pdata am654_icssg_pdata = {
.quirk_10m_link_issue = 1,
};
+static const struct prueth_pdata am64x_icssg_pdata = {
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+};
+
static const struct of_device_id prueth_dt_match[] = {
{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
+ { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, prueth_dt_match);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 3fe80a8758d3..8b6d6b497010 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -145,6 +145,7 @@ struct prueth_emac {
struct icss_iep *iep;
unsigned int rx_ts_enabled : 1;
unsigned int tx_ts_enabled : 1;
+ unsigned int half_duplex : 1;
/* DMA related */
struct prueth_tx_chn tx_chns[PRUETH_MAX_TX_QUEUES];
@@ -271,6 +272,7 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac,
int emac_set_port_state(struct prueth_emac *emac,
enum icssg_port_state_cmd state);
void icssg_config_set_speed(struct prueth_emac *emac);
+void icssg_config_half_duplex(struct prueth_emac *emac);
/* Buffer queue helpers */
int icssg_queue_pop(struct prueth *prueth, u8 queue);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index d829113c16ee..11b90e1da0c6 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2228,7 +2228,7 @@ probe_quit:
return ret;
}
-static int netcp_remove(struct platform_device *pdev)
+static void netcp_remove(struct platform_device *pdev)
{
struct netcp_device *netcp_device = platform_get_drvdata(pdev);
struct netcp_intf *netcp_intf, *netcp_tmp;
@@ -2256,7 +2256,6 @@ static int netcp_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
- return 0;
}
static const struct of_device_id of_match[] = {
@@ -2271,7 +2270,7 @@ static struct platform_driver netcp_driver = {
.of_match_table = of_match,
},
.probe = netcp_probe,
- .remove = netcp_remove,
+ .remove_new = netcp_remove,
};
module_platform_driver(netcp_driver);
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 50d7eacfec58..87e67121477c 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2332,7 +2332,7 @@ spider_net_alloc_card(void)
struct spider_net_card *card;
netdev = alloc_etherdev(struct_size(card, darray,
- tx_descriptors + rx_descriptors));
+ size_add(tx_descriptors, rx_descriptors)));
if (!netdev)
return NULL;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index d09d352e1c0a..554aff7c8f3b 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1660,7 +1660,7 @@ static void tsi108_timed_checker(struct timer_list *t)
mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
}
-static int tsi108_ether_remove(struct platform_device *pdev)
+static void tsi108_ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct tsi108_prv_data *priv = netdev_priv(dev);
@@ -1670,15 +1670,13 @@ static int tsi108_ether_remove(struct platform_device *pdev)
iounmap(priv->regs);
iounmap(priv->phyregs);
free_netdev(dev);
-
- return 0;
}
/* Structure for a device driver */
static struct platform_driver tsi_eth_driver = {
.probe = tsi108_init_one,
- .remove = tsi108_ether_remove,
+ .remove_new = tsi108_ether_remove,
.driver = {
.name = "tsi-ethernet",
},
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 3e09e5036490..e80c02948801 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2443,7 +2443,7 @@ static void rhine_remove_one_pci(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int rhine_remove_one_platform(struct platform_device *pdev)
+static void rhine_remove_one_platform(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2453,8 +2453,6 @@ static int rhine_remove_one_platform(struct platform_device *pdev)
iounmap(rp->base);
free_netdev(dev);
-
- return 0;
}
static void rhine_shutdown_pci(struct pci_dev *pdev)
@@ -2572,7 +2570,7 @@ static struct pci_driver rhine_driver_pci = {
static struct platform_driver rhine_driver_platform = {
.probe = rhine_init_one_platform,
- .remove = rhine_remove_one_platform,
+ .remove_new = rhine_remove_one_platform,
.driver = {
.name = DRV_NAME,
.of_match_table = rhine_of_tbl,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 731f689412e6..1c6b2a9bba08 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2957,11 +2957,9 @@ static int velocity_platform_probe(struct platform_device *pdev)
return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
}
-static int velocity_platform_remove(struct platform_device *pdev)
+static void velocity_platform_remove(struct platform_device *pdev)
{
velocity_remove(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -3249,7 +3247,7 @@ static struct pci_driver velocity_pci_driver = {
static struct platform_driver velocity_platform_driver = {
.probe = velocity_platform_probe,
- .remove = velocity_platform_remove,
+ .remove_new = velocity_platform_remove,
.driver = {
.name = "via-velocity",
.of_match_table = velocity_of_ids,
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 85dc16faca54..f0063d569c80 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -12,6 +12,98 @@
#include "wx_lib.h"
#include "wx_hw.h"
+static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
+{
+ struct wx *wx = bus->priv;
+ u32 command, val;
+ int ret;
+
+ /* setup and write the address cycle command */
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(devnum);
+ wr32(wx, WX_MSCA, command);
+
+ command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY;
+ if (wx->mac.type == wx_mac_em)
+ command |= WX_MDIO_CLK(6);
+ wr32(wx, WX_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
+ if (ret) {
+ wx_err(wx, "Mdio read c22 command did not complete.\n");
+ return ret;
+ }
+
+ return (u16)rd32(wx, WX_MSCC);
+}
+
+static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+ u32 command, val;
+ int ret;
+
+ /* setup and write the address cycle command */
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(devnum);
+ wr32(wx, WX_MSCA, command);
+
+ command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY;
+ if (wx->mac.type == wx_mac_em)
+ command |= WX_MDIO_CLK(6);
+ wr32(wx, WX_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
+ if (ret)
+ wx_err(wx, "Mdio write c22 command did not complete.\n");
+
+ return ret;
+}
+
+int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct wx *wx = bus->priv;
+
+ wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
+ return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum);
+}
+EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22);
+
+int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+
+ wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
+ return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value);
+}
+EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22);
+
+int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
+{
+ struct wx *wx = bus->priv;
+
+ wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
+ return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum);
+}
+EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45);
+
+int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+
+ wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
+ return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value);
+}
+EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45);
+
static void wx_intr_disable(struct wx *wx, u64 qmask)
{
u32 mask;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 0b3447bc6f2f..48d3ccabc272 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -4,6 +4,13 @@
#ifndef _WX_HW_H_
#define _WX_HW_H_
+#include <linux/phy.h>
+
+int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum);
+int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value);
+int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum);
+int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum, u16 value);
void wx_intr_enable(struct wx *wx, u64 qmask);
void wx_irq_disable(struct wx *wx);
int wx_check_flash_load(struct wx *wx, u32 check_bit);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index c5cbd177ef62..e3fc49284219 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -251,6 +251,7 @@ enum WX_MSCA_CMD_value {
#define WX_MSCC_SADDR BIT(18)
#define WX_MSCC_BUSY BIT(22)
#define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v)
+#define WX_MDIO_CLAUSE_SELECT 0x11220
#define WX_MMC_CONTROL 0x11800
#define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index 591f5b7b6da6..6302ecca71bb 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -29,117 +29,6 @@ static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int re
return 0;
}
-static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
-{
- u32 command, val, device_type = 0;
- struct wx *wx = bus->priv;
- int ret;
-
- wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(device_type);
- wr32(wx, WX_MSCA, command);
- command = WX_MSCC_CMD(WX_MSCA_CMD_READ) |
- WX_MSCC_BUSY |
- WX_MDIO_CLK(6);
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret) {
- wx_err(wx, "Mdio read c22 command did not complete.\n");
- return ret;
- }
-
- return (u16)rd32(wx, WX_MSCC);
-}
-
-static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
-{
- u32 command, val, device_type = 0;
- struct wx *wx = bus->priv;
- int ret;
-
- wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(device_type);
- wr32(wx, WX_MSCA, command);
- command = value |
- WX_MSCC_CMD(WX_MSCA_CMD_WRITE) |
- WX_MSCC_BUSY |
- WX_MDIO_CLK(6);
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret)
- wx_err(wx, "Mdio write c22 command did not complete.\n");
-
- return ret;
-}
-
-static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
-{
- struct wx *wx = bus->priv;
- u32 val, command;
- int ret;
-
- wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(devnum);
- wr32(wx, WX_MSCA, command);
- command = WX_MSCC_CMD(WX_MSCA_CMD_READ) |
- WX_MSCC_BUSY |
- WX_MDIO_CLK(6);
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret) {
- wx_err(wx, "Mdio read c45 command did not complete.\n");
- return ret;
- }
-
- return (u16)rd32(wx, WX_MSCC);
-}
-
-static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
- int devnum, int regnum, u16 value)
-{
- struct wx *wx = bus->priv;
- int ret, command;
- u16 val;
-
- wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(devnum);
- wr32(wx, WX_MSCA, command);
- command = value |
- WX_MSCC_CMD(WX_MSCA_CMD_WRITE) |
- WX_MSCC_BUSY |
- WX_MDIO_CLK(6);
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret)
- wx_err(wx, "Mdio write c45 command did not complete.\n");
-
- return ret;
-}
-
static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum)
{
struct wx *wx = bus->priv;
@@ -148,7 +37,7 @@ static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum)
if (wx->mac_type == em_mac_type_mdi)
phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum);
else
- phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum);
+ phy_data = wx_phy_read_reg_mdi_c22(bus, phy_addr, regnum);
return phy_data;
}
@@ -162,7 +51,7 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr,
if (wx->mac_type == em_mac_type_mdi)
ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value);
else
- ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value);
+ ret = wx_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value);
return ret;
}
@@ -262,8 +151,8 @@ int ngbe_mdio_init(struct wx *wx)
mii_bus->priv = wx;
if (wx->mac_type == em_mac_type_rgmii) {
- mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45;
- mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45;
+ mii_bus->read_c45 = wx_phy_read_reg_mdi_c45;
+ mii_bus->write_c45 = wx_phy_write_reg_mdi_c45;
}
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev));
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index 72c8cd2d5575..ff754d69bdf6 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -59,9 +59,6 @@
#define NGBE_EEPROM_VERSION_L 0x1D
#define NGBE_EEPROM_VERSION_H 0x1E
-/* Media-dependent registers. */
-#define NGBE_MDIO_CLAUSE_SELECT 0x11220
-
/* GPIO Registers */
#define NGBE_GPIO_DR 0x14800
#define NGBE_GPIO_DDR 0x14804
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 4159c84035fd..b6c06adb8656 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -647,58 +647,6 @@ static int txgbe_sfp_register(struct txgbe *txgbe)
return 0;
}
-static int txgbe_phy_read(struct mii_bus *bus, int phy_addr,
- int devnum, int regnum)
-{
- struct wx *wx = bus->priv;
- u32 val, command;
- int ret;
-
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(devnum);
- wr32(wx, WX_MSCA, command);
-
- command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY;
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret) {
- wx_err(wx, "Mdio read c45 command did not complete.\n");
- return ret;
- }
-
- return (u16)rd32(wx, WX_MSCC);
-}
-
-static int txgbe_phy_write(struct mii_bus *bus, int phy_addr,
- int devnum, int regnum, u16 value)
-{
- struct wx *wx = bus->priv;
- int ret, command;
- u16 val;
-
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(devnum);
- wr32(wx, WX_MSCA, command);
-
- command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY;
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret)
- wx_err(wx, "Mdio write c45 command did not complete.\n");
-
- return ret;
-}
-
static int txgbe_ext_phy_init(struct txgbe *txgbe)
{
struct phy_device *phydev;
@@ -715,8 +663,8 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe)
return -ENOMEM;
mii_bus->name = "txgbe_mii_bus";
- mii_bus->read_c45 = &txgbe_phy_read;
- mii_bus->write_c45 = &txgbe_phy_write;
+ mii_bus->read_c45 = &wx_phy_read_reg_mdi_c45;
+ mii_bus->write_c45 = &wx_phy_write_reg_mdi_c45;
mii_bus->parent = &pdev->dev;
mii_bus->phy_mask = GENMASK(31, 1);
mii_bus->priv = wx;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 634946e87e5f..341ee2f249fd 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1062,11 +1062,9 @@ static int w5100_mmio_probe(struct platform_device *pdev)
mac_addr, irq, data ? data->link_gpio : -EINVAL);
}
-static int w5100_mmio_remove(struct platform_device *pdev)
+static void w5100_mmio_remove(struct platform_device *pdev)
{
w5100_remove(&pdev->dev);
-
- return 0;
}
void *w5100_ops_priv(const struct net_device *ndev)
@@ -1273,6 +1271,6 @@ static struct platform_driver w5100_mmio_driver = {
.pm = &w5100_pm_ops,
},
.probe = w5100_mmio_probe,
- .remove = w5100_mmio_remove,
+ .remove_new = w5100_mmio_remove,
};
module_platform_driver(w5100_mmio_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index b0958fe8111e..3318b50a5911 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -627,7 +627,7 @@ err_register:
return err;
}
-static int w5300_remove(struct platform_device *pdev)
+static void w5300_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5300_priv *priv = netdev_priv(ndev);
@@ -639,7 +639,6 @@ static int w5300_remove(struct platform_device *pdev)
unregister_netdev(ndev);
free_netdev(ndev);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -683,7 +682,7 @@ static struct platform_driver w5300_driver = {
.pm = &w5300_pm_ops,
},
.probe = w5300_probe,
- .remove = w5300_remove,
+ .remove_new = w5300_remove,
};
module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1444b855e7aa..9df39cf8b097 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1626,7 +1626,7 @@ err_sysfs_create:
return rc;
}
-static int temac_remove(struct platform_device *pdev)
+static void temac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct temac_local *lp = netdev_priv(ndev);
@@ -1636,7 +1636,6 @@ static int temac_remove(struct platform_device *pdev)
if (lp->phy_node)
of_node_put(lp->phy_node);
temac_mdio_teardown(lp);
- return 0;
}
static const struct of_device_id temac_of_match[] = {
@@ -1650,7 +1649,7 @@ MODULE_DEVICE_TABLE(of, temac_of_match);
static struct platform_driver temac_driver = {
.probe = temac_probe,
- .remove = temac_remove,
+ .remove_new = temac_remove,
.driver = {
.name = "xilinx_temac",
.of_match_table = temac_of_match,
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b7ec4dafae90..82d0d44b2b02 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2183,7 +2183,7 @@ free_netdev:
return ret;
}
-static int axienet_remove(struct platform_device *pdev)
+static void axienet_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
@@ -2202,8 +2202,6 @@ static int axienet_remove(struct platform_device *pdev)
clk_disable_unprepare(lp->axi_clk);
free_netdev(ndev);
-
- return 0;
}
static void axienet_shutdown(struct platform_device *pdev)
@@ -2256,7 +2254,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
static struct platform_driver axienet_driver = {
.probe = axienet_probe,
- .remove = axienet_remove,
+ .remove_new = axienet_remove,
.shutdown = axienet_shutdown,
.driver = {
.name = "xilinx_axienet",
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b358ecc67227..765aa516aada 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1180,10 +1180,8 @@ error:
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
- *
- * Return: 0, always.
*/
-static int xemaclite_of_remove(struct platform_device *of_dev)
+static void xemaclite_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = platform_get_drvdata(of_dev);
@@ -1202,8 +1200,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
lp->phy_node = NULL;
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1262,7 +1258,7 @@ static struct platform_driver xemaclite_of_driver = {
.of_match_table = xemaclite_of_match,
},
.probe = xemaclite_of_probe,
- .remove = xemaclite_of_remove,
+ .remove_new = xemaclite_of_remove,
};
module_platform_driver(xemaclite_of_driver);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 3b0c5f177447..5c2fd2852e32 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -24,6 +24,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/net_tstamp.h>
@@ -1488,6 +1489,13 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
ndev->dev.dma_mask = dev->dma_mask;
ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
+ /* Maximum frame size is 16320 bytes and includes VLAN and
+ * ethernet headers. See "IXP400 Software Programmer's Guide"
+ * section 10.3.2, page 161.
+ */
+ ndev->min_mtu = ETH_MIN_MTU;
+ ndev->max_mtu = 16320 - VLAN_ETH_HLEN;
+
netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
if (!(port->npe = npe_request(NPE_ID(port->id))))
@@ -1533,7 +1541,7 @@ err_free_mem:
return err;
}
-static int ixp4xx_eth_remove(struct platform_device *pdev)
+static void ixp4xx_eth_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct phy_device *phydev = ndev->phydev;
@@ -1544,7 +1552,6 @@ static int ixp4xx_eth_remove(struct platform_device *pdev)
ixp4xx_mdio_remove();
npe_port_tab[NPE_ID(port->id)] = NULL;
npe_release(port->npe);
- return 0;
}
static const struct of_device_id ixp4xx_eth_of_match[] = {
@@ -1560,7 +1567,7 @@ static struct platform_driver ixp4xx_eth_driver = {
.of_match_table = of_match_ptr(ixp4xx_eth_of_match),
},
.probe = ixp4xx_eth_probe,
- .remove = ixp4xx_eth_remove,
+ .remove_new = ixp4xx_eth_remove,
};
module_platform_driver(ixp4xx_eth_driver);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 144ec626230d..b3aa0c3d5826 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -630,7 +630,7 @@ static void __gtp_encap_destroy(struct sock *sk)
gtp->sk0 = NULL;
else
gtp->sk1u = NULL;
- udp_sk(sk)->encap_type = 0;
+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
rcu_assign_sk_user_data(sk, NULL);
release_sock(sk);
sock_put(sk);
@@ -682,7 +682,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
- switch (udp_sk(sk)->encap_type) {
+ switch (READ_ONCE(udp_sk(sk)->encap_type)) {
case UDP_ENCAP_GTP0:
netdev_dbg(gtp->dev, "received GTP0 packet\n");
ret = gtp0_udp_encap_recv(gtp, skb);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index a94c7bd5db2e..25b1f929c422 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -94,8 +94,8 @@ config BAYCOM_SER_FDX
driver, "BAYCOM ser12 half-duplex driver for AX.25" is the old
driver and still provided in case this driver does not work with
your serial interface chip. To configure the driver, use the sethdlc
- utility available in the standard ax25 utilities package. For
- information on the modems, see <http://www.baycom.de/> and
+ utility available in the standard ax25 utilities package.
+ For more information on the modems, see
<file:Documentation/networking/device_drivers/hamradio/baycom.rst>.
To compile this driver as a module, choose M here: the module
@@ -112,8 +112,7 @@ config BAYCOM_SER_HDX
still provided in case your serial interface chip does not work with
the full-duplex driver. This driver is deprecated. To configure
the driver, use the sethdlc utility available in the standard ax25
- utilities package. For information on the modems, see
- <http://www.baycom.de/> and
+ utilities package. For more information on the modems, see
<file:Documentation/networking/device_drivers/hamradio/baycom.rst>.
To compile this driver as a module, choose M here: the module
@@ -127,8 +126,8 @@ config BAYCOM_PAR
This is a driver for Baycom style simple amateur radio modems that
connect to a parallel interface. The driver supports the picpar and
par96 designs. To configure the driver, use the sethdlc utility
- available in the standard ax25 utilities package. For information on
- the modems, see <http://www.baycom.de/> and the file
+ available in the standard ax25 utilities package.
+ For more information on the modems, see
<file:Documentation/networking/device_drivers/hamradio/baycom.rst>.
To compile this driver as a module, choose M here: the module
@@ -142,8 +141,8 @@ config BAYCOM_EPP
This is a driver for Baycom style simple amateur radio modems that
connect to a parallel interface. The driver supports the EPP
designs. To configure the driver, use the sethdlc utility available
- in the standard ax25 utilities package. For information on the
- modems, see <http://www.baycom.de/> and the file
+ in the standard ax25 utilities package.
+ For more information on the modems, see
<file:Documentation/networking/device_drivers/hamradio/baycom.rst>.
To compile this driver as a module, choose M here: the module
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index 0eaa7a7f3343..e223886123ce 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -67,7 +67,7 @@ struct ipa_power {
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
- struct icc_bulk_data interconnect[];
+ struct icc_bulk_data interconnect[] __counted_by(interconnect_count);
};
/* Initialize interconnects required for IPA operation */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index b7e151439c48..7a44e1cbe305 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3655,9 +3655,9 @@ static void macsec_get_stats64(struct net_device *dev,
dev_fetch_sw_netstats(s, dev->tstats);
- s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
- s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
- s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
+ s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
+ s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
+ s->rx_errors = DEV_STATS_READ(dev, rx_errors);
}
static int macsec_get_iflink(const struct net_device *dev)
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index c727103c8b05..70edeeb7771e 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -177,15 +177,13 @@ static int aspeed_mdio_probe(struct platform_device *pdev)
return 0;
}
-static int aspeed_mdio_remove(struct platform_device *pdev)
+static void aspeed_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev);
struct aspeed_mdio *ctx = bus->priv;
reset_control_assert(ctx->reset);
mdiobus_unregister(bus);
-
- return 0;
}
static const struct of_device_id aspeed_mdio_of_match[] = {
@@ -200,7 +198,7 @@ static struct platform_driver aspeed_mdio_driver = {
.of_match_table = aspeed_mdio_of_match,
},
.probe = aspeed_mdio_probe,
- .remove = aspeed_mdio_remove,
+ .remove_new = aspeed_mdio_remove,
};
module_platform_driver(aspeed_mdio_driver);
diff --git a/drivers/net/mdio/mdio-bcm-iproc.c b/drivers/net/mdio/mdio-bcm-iproc.c
index 77fc970cdfde..5a2d26c6afdc 100644
--- a/drivers/net/mdio/mdio-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-bcm-iproc.c
@@ -168,14 +168,12 @@ err_iproc_mdio:
return rc;
}
-static int iproc_mdio_remove(struct platform_device *pdev)
+static void iproc_mdio_remove(struct platform_device *pdev)
{
struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -210,7 +208,7 @@ static struct platform_driver iproc_mdio_driver = {
#endif
},
.probe = iproc_mdio_probe,
- .remove = iproc_mdio_remove,
+ .remove_new = iproc_mdio_remove,
};
module_platform_driver(iproc_mdio_driver);
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 6b26a0803696..e8cd8eef319b 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -296,15 +296,13 @@ out_clk_disable:
return ret;
}
-static int unimac_mdio_remove(struct platform_device *pdev)
+static void unimac_mdio_remove(struct platform_device *pdev)
{
struct unimac_mdio_priv *priv = platform_get_drvdata(pdev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static int __maybe_unused unimac_mdio_suspend(struct device *d)
@@ -353,7 +351,7 @@ static struct platform_driver unimac_mdio_driver = {
.pm = &unimac_mdio_pm_ops,
},
.probe = unimac_mdio_probe,
- .remove = unimac_mdio_remove,
+ .remove_new = unimac_mdio_remove,
};
module_platform_driver(unimac_mdio_driver);
diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c
index 0fb3c2de0845..897b88c50bbb 100644
--- a/drivers/net/mdio/mdio-gpio.c
+++ b/drivers/net/mdio/mdio-gpio.c
@@ -194,11 +194,9 @@ static int mdio_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int mdio_gpio_remove(struct platform_device *pdev)
+static void mdio_gpio_remove(struct platform_device *pdev)
{
mdio_gpio_bus_destroy(&pdev->dev);
-
- return 0;
}
static const struct of_device_id mdio_gpio_of_match[] = {
@@ -210,7 +208,7 @@ MODULE_DEVICE_TABLE(of, mdio_gpio_of_match);
static struct platform_driver mdio_gpio_driver = {
.probe = mdio_gpio_probe,
- .remove = mdio_gpio_remove,
+ .remove_new = mdio_gpio_remove,
.driver = {
.name = "mdio-gpio",
.of_match_table = mdio_gpio_of_match,
diff --git a/drivers/net/mdio/mdio-hisi-femac.c b/drivers/net/mdio/mdio-hisi-femac.c
index f231c2fbb1de..6703f626ee83 100644
--- a/drivers/net/mdio/mdio-hisi-femac.c
+++ b/drivers/net/mdio/mdio-hisi-femac.c
@@ -118,7 +118,7 @@ err_out_free_mdiobus:
return ret;
}
-static int hisi_femac_mdio_remove(struct platform_device *pdev)
+static void hisi_femac_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
struct hisi_femac_mdio_data *data = bus->priv;
@@ -126,8 +126,6 @@ static int hisi_femac_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(bus);
clk_disable_unprepare(data->clk);
mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id hisi_femac_mdio_dt_ids[] = {
@@ -138,7 +136,7 @@ MODULE_DEVICE_TABLE(of, hisi_femac_mdio_dt_ids);
static struct platform_driver hisi_femac_mdio_driver = {
.probe = hisi_femac_mdio_probe,
- .remove = hisi_femac_mdio_remove,
+ .remove_new = hisi_femac_mdio_remove,
.driver = {
.name = "hisi-femac-mdio",
.of_match_table = hisi_femac_mdio_dt_ids,
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 78b93de636f5..abd8b508ec16 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -278,13 +278,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
return 0;
}
-static int ipq4019_mdio_remove(struct platform_device *pdev)
+static void ipq4019_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus);
-
- return 0;
}
static const struct of_device_id ipq4019_mdio_dt_ids[] = {
@@ -296,7 +294,7 @@ MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids);
static struct platform_driver ipq4019_mdio_driver = {
.probe = ipq4019_mdio_probe,
- .remove = ipq4019_mdio_remove,
+ .remove_new = ipq4019_mdio_remove,
.driver = {
.name = "ipq4019-mdio",
.of_match_table = ipq4019_mdio_dt_ids,
diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index fd9716960106..f71b6e1c66e4 100644
--- a/drivers/net/mdio/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
@@ -147,14 +147,11 @@ ipq8064_mdio_probe(struct platform_device *pdev)
return 0;
}
-static int
-ipq8064_mdio_remove(struct platform_device *pdev)
+static void ipq8064_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus);
-
- return 0;
}
static const struct of_device_id ipq8064_mdio_dt_ids[] = {
@@ -165,7 +162,7 @@ MODULE_DEVICE_TABLE(of, ipq8064_mdio_dt_ids);
static struct platform_driver ipq8064_mdio_driver = {
.probe = ipq8064_mdio_probe,
- .remove = ipq8064_mdio_remove,
+ .remove_new = ipq8064_mdio_remove,
.driver = {
.name = "ipq8064-mdio",
.of_match_table = ipq8064_mdio_dt_ids,
diff --git a/drivers/net/mdio/mdio-moxart.c b/drivers/net/mdio/mdio-moxart.c
index f0cff584e176..d35af8cd7c4d 100644
--- a/drivers/net/mdio/mdio-moxart.c
+++ b/drivers/net/mdio/mdio-moxart.c
@@ -155,14 +155,12 @@ err_out_free_mdiobus:
return ret;
}
-static int moxart_mdio_remove(struct platform_device *pdev)
+static void moxart_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus);
mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id moxart_mdio_dt_ids[] = {
@@ -173,7 +171,7 @@ MODULE_DEVICE_TABLE(of, moxart_mdio_dt_ids);
static struct platform_driver moxart_mdio_driver = {
.probe = moxart_mdio_probe,
- .remove = moxart_mdio_remove,
+ .remove_new = moxart_mdio_remove,
.driver = {
.name = "moxart-mdio",
.of_match_table = moxart_mdio_dt_ids,
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 1a1b95ae95fa..c29377c85307 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -335,15 +335,13 @@ out_disable_clk:
return ret;
}
-static int mscc_miim_remove(struct platform_device *pdev)
+static void mscc_miim_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
struct mscc_miim_dev *miim = bus->priv;
clk_disable_unprepare(miim->clk);
mdiobus_unregister(bus);
-
- return 0;
}
static const struct mscc_miim_info mscc_ocelot_miim_info = {
@@ -371,7 +369,7 @@ MODULE_DEVICE_TABLE(of, mscc_miim_match);
static struct platform_driver mscc_miim_driver = {
.probe = mscc_miim_probe,
- .remove = mscc_miim_remove,
+ .remove_new = mscc_miim_remove,
.driver = {
.name = "mscc-miim",
.of_match_table = mscc_miim_match,
diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 956d54846b62..a750bd4c77a0 100644
--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
@@ -287,15 +287,13 @@ out_clk:
return rc;
}
-static int mdio_mux_iproc_remove(struct platform_device *pdev)
+static void mdio_mux_iproc_remove(struct platform_device *pdev)
{
struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
mdio_mux_uninit(md->mux_handle);
mdiobus_unregister(md->mii_bus);
clk_disable_unprepare(md->core_clk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -342,7 +340,7 @@ static struct platform_driver mdiomux_iproc_driver = {
.pm = &mdio_mux_iproc_pm_ops,
},
.probe = mdio_mux_iproc_probe,
- .remove = mdio_mux_iproc_remove,
+ .remove_new = mdio_mux_iproc_remove,
};
module_platform_driver(mdiomux_iproc_driver);
diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c
index 8b444a8eb6b5..1b77e0e3e6e1 100644
--- a/drivers/net/mdio/mdio-mux-bcm6368.c
+++ b/drivers/net/mdio/mdio-mux-bcm6368.c
@@ -153,14 +153,12 @@ out_register:
return rc;
}
-static int bcm6368_mdiomux_remove(struct platform_device *pdev)
+static void bcm6368_mdiomux_remove(struct platform_device *pdev)
{
struct bcm6368_mdiomux_desc *md = platform_get_drvdata(pdev);
mdio_mux_uninit(md->mux_handle);
mdiobus_unregister(md->mii_bus);
-
- return 0;
}
static const struct of_device_id bcm6368_mdiomux_ids[] = {
@@ -175,7 +173,7 @@ static struct platform_driver bcm6368_mdiomux_driver = {
.of_match_table = bcm6368_mdiomux_ids,
},
.probe = bcm6368_mdiomux_probe,
- .remove = bcm6368_mdiomux_remove,
+ .remove_new = bcm6368_mdiomux_remove,
};
module_platform_driver(bcm6368_mdiomux_driver);
diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c
index 3c7f16f06b45..38fb031f8979 100644
--- a/drivers/net/mdio/mdio-mux-gpio.c
+++ b/drivers/net/mdio/mdio-mux-gpio.c
@@ -62,11 +62,10 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int mdio_mux_gpio_remove(struct platform_device *pdev)
+static void mdio_mux_gpio_remove(struct platform_device *pdev)
{
struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
mdio_mux_uninit(s->mux_handle);
- return 0;
}
static const struct of_device_id mdio_mux_gpio_match[] = {
@@ -87,7 +86,7 @@ static struct platform_driver mdio_mux_gpio_driver = {
.of_match_table = mdio_mux_gpio_match,
},
.probe = mdio_mux_gpio_probe,
- .remove = mdio_mux_gpio_remove,
+ .remove_new = mdio_mux_gpio_remove,
};
module_platform_driver(mdio_mux_gpio_driver);
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index 910e5cf74e89..754b0f2cf15b 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -336,7 +336,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
return ret;
}
-static int g12a_mdio_mux_remove(struct platform_device *pdev)
+static void g12a_mdio_mux_remove(struct platform_device *pdev)
{
struct g12a_mdio_mux *priv = platform_get_drvdata(pdev);
@@ -344,13 +344,11 @@ static int g12a_mdio_mux_remove(struct platform_device *pdev)
if (__clk_is_enabled(priv->pll))
clk_disable_unprepare(priv->pll);
-
- return 0;
}
static struct platform_driver g12a_mdio_mux_driver = {
.probe = g12a_mdio_mux_probe,
- .remove = g12a_mdio_mux_remove,
+ .remove_new = g12a_mdio_mux_remove,
.driver = {
.name = "g12a-mdio_mux",
.of_match_table = g12a_mdio_mux_match,
diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c
index 76188575ca1f..89554021b5cc 100644
--- a/drivers/net/mdio/mdio-mux-meson-gxl.c
+++ b/drivers/net/mdio/mdio-mux-meson-gxl.c
@@ -140,18 +140,16 @@ static int gxl_mdio_mux_probe(struct platform_device *pdev)
return ret;
}
-static int gxl_mdio_mux_remove(struct platform_device *pdev)
+static void gxl_mdio_mux_remove(struct platform_device *pdev)
{
struct gxl_mdio_mux *priv = platform_get_drvdata(pdev);
mdio_mux_uninit(priv->mux_handle);
-
- return 0;
}
static struct platform_driver gxl_mdio_mux_driver = {
.probe = gxl_mdio_mux_probe,
- .remove = gxl_mdio_mux_remove,
+ .remove_new = gxl_mdio_mux_remove,
.driver = {
.name = "gxl-mdio-mux",
.of_match_table = gxl_mdio_mux_match,
diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index 09af150ed774..de08419d0c98 100644
--- a/drivers/net/mdio/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
@@ -169,13 +169,11 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
return 0;
}
-static int mdio_mux_mmioreg_remove(struct platform_device *pdev)
+static void mdio_mux_mmioreg_remove(struct platform_device *pdev)
{
struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
mdio_mux_uninit(s->mux_handle);
-
- return 0;
}
static const struct of_device_id mdio_mux_mmioreg_match[] = {
@@ -192,7 +190,7 @@ static struct platform_driver mdio_mux_mmioreg_driver = {
.of_match_table = mdio_mux_mmioreg_match,
},
.probe = mdio_mux_mmioreg_probe,
- .remove = mdio_mux_mmioreg_remove,
+ .remove_new = mdio_mux_mmioreg_remove,
};
module_platform_driver(mdio_mux_mmioreg_driver);
diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c
index bfa5af577b0a..569b13383191 100644
--- a/drivers/net/mdio/mdio-mux-multiplexer.c
+++ b/drivers/net/mdio/mdio-mux-multiplexer.c
@@ -85,7 +85,7 @@ static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
return ret;
}
-static int mdio_mux_multiplexer_remove(struct platform_device *pdev)
+static void mdio_mux_multiplexer_remove(struct platform_device *pdev)
{
struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev);
@@ -93,8 +93,6 @@ static int mdio_mux_multiplexer_remove(struct platform_device *pdev)
if (s->do_deselect)
mux_control_deselect(s->muxc);
-
- return 0;
}
static const struct of_device_id mdio_mux_multiplexer_match[] = {
@@ -109,7 +107,7 @@ static struct platform_driver mdio_mux_multiplexer_driver = {
.of_match_table = mdio_mux_multiplexer_match,
},
.probe = mdio_mux_multiplexer_probe,
- .remove = mdio_mux_multiplexer_remove,
+ .remove_new = mdio_mux_multiplexer_remove,
};
module_platform_driver(mdio_mux_multiplexer_driver);
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 7c65c547d377..037a38cfed56 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -78,7 +78,7 @@ fail_register:
return err;
}
-static int octeon_mdiobus_remove(struct platform_device *pdev)
+static void octeon_mdiobus_remove(struct platform_device *pdev)
{
struct cavium_mdiobus *bus;
union cvmx_smix_en smi_en;
@@ -88,7 +88,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
mdiobus_unregister(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
- return 0;
}
static const struct of_device_id octeon_mdiobus_match[] = {
@@ -105,7 +104,7 @@ static struct platform_driver octeon_mdiobus_driver = {
.of_match_table = octeon_mdiobus_match,
},
.probe = octeon_mdiobus_probe,
- .remove = octeon_mdiobus_remove,
+ .remove_new = octeon_mdiobus_remove,
};
module_platform_driver(octeon_mdiobus_driver);
diff --git a/drivers/net/mdio/mdio-sun4i.c b/drivers/net/mdio/mdio-sun4i.c
index f798de3276dc..4511bcc73b36 100644
--- a/drivers/net/mdio/mdio-sun4i.c
+++ b/drivers/net/mdio/mdio-sun4i.c
@@ -142,7 +142,7 @@ err_out_free_mdiobus:
return ret;
}
-static int sun4i_mdio_remove(struct platform_device *pdev)
+static void sun4i_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
struct sun4i_mdio_data *data = bus->priv;
@@ -151,8 +151,6 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
if (data->regulator)
regulator_disable(data->regulator);
mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id sun4i_mdio_dt_ids[] = {
@@ -166,7 +164,7 @@ MODULE_DEVICE_TABLE(of, sun4i_mdio_dt_ids);
static struct platform_driver sun4i_mdio_driver = {
.probe = sun4i_mdio_probe,
- .remove = sun4i_mdio_remove,
+ .remove_new = sun4i_mdio_remove,
.driver = {
.name = "sun4i-mdio",
.of_match_table = sun4i_mdio_dt_ids,
diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 1190a793555a..7909d7caf45c 100644
--- a/drivers/net/mdio/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -432,7 +432,7 @@ out_clk:
return ret;
}
-static int xgene_mdio_remove(struct platform_device *pdev)
+static void xgene_mdio_remove(struct platform_device *pdev)
{
struct xgene_mdio_pdata *pdata = platform_get_drvdata(pdev);
struct mii_bus *mdio_bus = pdata->mdio_bus;
@@ -443,8 +443,6 @@ static int xgene_mdio_remove(struct platform_device *pdev)
if (dev->of_node)
clk_disable_unprepare(pdata->clk);
-
- return 0;
}
static struct platform_driver xgene_mdio_driver = {
@@ -454,7 +452,7 @@ static struct platform_driver xgene_mdio_driver = {
.acpi_match_table = ACPI_PTR(xgene_mdio_acpi_match),
},
.probe = xgene_mdio_probe,
- .remove = xgene_mdio_remove,
+ .remove_new = xgene_mdio_remove,
};
module_platform_driver(xgene_mdio_driver);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 107880d13d21..421d2b62918f 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -69,9 +69,9 @@ config SFP
comment "MII PHY device drivers"
config AMD_PHY
- tristate "AMD PHYs"
+ tristate "AMD and Altima PHYs"
help
- Currently supports the am79c874
+ Currently supports the AMD am79c874 and Altima AC101L.
config MESON_GXL_PHY
tristate "Amlogic Meson GXL Internal PHY"
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 001bb6d8bfce..930b15fa6ce9 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -13,6 +13,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
+#define PHY_ID_AC101L 0x00225520
#define PHY_ID_AM79C874 0x0022561b
#define MII_AM79C_IR 17 /* Interrupt Status/Control Register */
@@ -87,19 +88,31 @@ static irqreturn_t am79c_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-static struct phy_driver am79c_driver[] = { {
- .phy_id = PHY_ID_AM79C874,
- .name = "AM79C874",
- .phy_id_mask = 0xfffffff0,
- /* PHY_BASIC_FEATURES */
- .config_init = am79c_config_init,
- .config_intr = am79c_config_intr,
- .handle_interrupt = am79c_handle_interrupt,
-} };
+static struct phy_driver am79c_drivers[] = {
+ {
+ .phy_id = PHY_ID_AM79C874,
+ .name = "AM79C874",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_BASIC_FEATURES */
+ .config_init = am79c_config_init,
+ .config_intr = am79c_config_intr,
+ .handle_interrupt = am79c_handle_interrupt,
+ },
+ {
+ .phy_id = PHY_ID_AC101L,
+ .name = "AC101L",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_BASIC_FEATURES */
+ .config_init = am79c_config_init,
+ .config_intr = am79c_config_intr,
+ .handle_interrupt = am79c_handle_interrupt,
+ },
+};
-module_phy_driver(am79c_driver);
+module_phy_driver(am79c_drivers);
static struct mdio_device_id __maybe_unused amd_tbl[] = {
+ { PHY_ID_AC101L, 0xfffffff0 },
{ PHY_ID_AM79C874, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index 0f1e617a26c9..eb74a8cf8df1 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -90,7 +90,7 @@ static void asix_ax88772a_link_change_notify(struct phy_device *phydev)
*/
if (phydev->state == PHY_NOLINK) {
phy_init_hw(phydev);
- phy_start_aneg(phydev);
+ _phy_start_aneg(phydev);
}
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index df54c137c5f5..a5fa077650e8 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -981,7 +981,7 @@ static int phy_check_link_status(struct phy_device *phydev)
* If the PHYCONTROL Layer is operating, we change the state to
* reflect the beginning of Auto-negotiation or forcing.
*/
-static int _phy_start_aneg(struct phy_device *phydev)
+int _phy_start_aneg(struct phy_device *phydev)
{
int err;
@@ -1002,6 +1002,7 @@ static int _phy_start_aneg(struct phy_device *phydev)
return err;
}
+EXPORT_SYMBOL(_phy_start_aneg);
/**
* phy_start_aneg - start auto-negotiation for this PHY device
@@ -1231,9 +1232,7 @@ static void phy_error_precise(struct phy_device *phydev,
const void *func, int err)
{
WARN(1, "%pS: returned: %d\n", func, err);
- mutex_lock(&phydev->lock);
phy_process_error(phydev);
- mutex_unlock(&phydev->lock);
}
/**
@@ -1355,6 +1354,113 @@ void phy_free_interrupt(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_free_interrupt);
+enum phy_state_work {
+ PHY_STATE_WORK_NONE,
+ PHY_STATE_WORK_ANEG,
+ PHY_STATE_WORK_SUSPEND,
+};
+
+static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
+{
+ enum phy_state_work state_work = PHY_STATE_WORK_NONE;
+ struct net_device *dev = phydev->attached_dev;
+ enum phy_state old_state = phydev->state;
+ const void *func = NULL;
+ bool finished = false;
+ int err = 0;
+
+ switch (phydev->state) {
+ case PHY_DOWN:
+ case PHY_READY:
+ break;
+ case PHY_UP:
+ state_work = PHY_STATE_WORK_ANEG;
+ break;
+ case PHY_NOLINK:
+ case PHY_RUNNING:
+ err = phy_check_link_status(phydev);
+ func = &phy_check_link_status;
+ break;
+ case PHY_CABLETEST:
+ err = phydev->drv->cable_test_get_status(phydev, &finished);
+ if (err) {
+ phy_abort_cable_test(phydev);
+ netif_testing_off(dev);
+ state_work = PHY_STATE_WORK_ANEG;
+ phydev->state = PHY_UP;
+ break;
+ }
+
+ if (finished) {
+ ethnl_cable_test_finished(phydev);
+ netif_testing_off(dev);
+ state_work = PHY_STATE_WORK_ANEG;
+ phydev->state = PHY_UP;
+ }
+ break;
+ case PHY_HALTED:
+ case PHY_ERROR:
+ if (phydev->link) {
+ phydev->link = 0;
+ phy_link_down(phydev);
+ }
+ state_work = PHY_STATE_WORK_SUSPEND;
+ break;
+ }
+
+ if (state_work == PHY_STATE_WORK_ANEG) {
+ err = _phy_start_aneg(phydev);
+ func = &_phy_start_aneg;
+ }
+
+ if (err == -ENODEV)
+ return state_work;
+
+ if (err < 0)
+ phy_error_precise(phydev, func, err);
+
+ phy_process_state_change(phydev, old_state);
+
+ /* Only re-schedule a PHY state machine change if we are polling the
+ * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving
+ * between states from phy_mac_interrupt().
+ *
+ * In state PHY_HALTED the PHY gets suspended, so rescheduling the
+ * state machine would be pointless and possibly error prone when
+ * called from phy_disconnect() synchronously.
+ */
+ if (phy_polling_mode(phydev) && phy_is_started(phydev))
+ phy_queue_state_machine(phydev, PHY_STATE_TIME);
+
+ return state_work;
+}
+
+/* unlocked part of the PHY state machine */
+static void _phy_state_machine_post_work(struct phy_device *phydev,
+ enum phy_state_work state_work)
+{
+ if (state_work == PHY_STATE_WORK_SUSPEND)
+ phy_suspend(phydev);
+}
+
+/**
+ * phy_state_machine - Handle the state machine
+ * @work: work_struct that describes the work to be done
+ */
+void phy_state_machine(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct phy_device *phydev =
+ container_of(dwork, struct phy_device, state_queue);
+ enum phy_state_work state_work;
+
+ mutex_lock(&phydev->lock);
+ state_work = _phy_state_machine(phydev);
+ mutex_unlock(&phydev->lock);
+
+ _phy_state_machine_post_work(phydev, state_work);
+}
+
/**
* phy_stop - Bring down the PHY link, and stop checking the status
* @phydev: target phy_device struct
@@ -1362,6 +1468,7 @@ EXPORT_SYMBOL(phy_free_interrupt);
void phy_stop(struct phy_device *phydev)
{
struct net_device *dev = phydev->attached_dev;
+ enum phy_state_work state_work;
enum phy_state old_state;
if (!phy_is_started(phydev) && phydev->state != PHY_DOWN &&
@@ -1385,9 +1492,10 @@ void phy_stop(struct phy_device *phydev)
phydev->state = PHY_HALTED;
phy_process_state_change(phydev, old_state);
+ state_work = _phy_state_machine(phydev);
mutex_unlock(&phydev->lock);
- phy_state_machine(&phydev->state_queue.work);
+ _phy_state_machine_post_work(phydev, state_work);
phy_stop_machine(phydev);
/* Cannot call flush_scheduled_work() here as desired because
@@ -1432,97 +1540,6 @@ out:
EXPORT_SYMBOL(phy_start);
/**
- * phy_state_machine - Handle the state machine
- * @work: work_struct that describes the work to be done
- */
-void phy_state_machine(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct phy_device *phydev =
- container_of(dwork, struct phy_device, state_queue);
- struct net_device *dev = phydev->attached_dev;
- bool needs_aneg = false, do_suspend = false;
- enum phy_state old_state;
- const void *func = NULL;
- bool finished = false;
- int err = 0;
-
- mutex_lock(&phydev->lock);
-
- old_state = phydev->state;
-
- switch (phydev->state) {
- case PHY_DOWN:
- case PHY_READY:
- break;
- case PHY_UP:
- needs_aneg = true;
-
- break;
- case PHY_NOLINK:
- case PHY_RUNNING:
- err = phy_check_link_status(phydev);
- func = &phy_check_link_status;
- break;
- case PHY_CABLETEST:
- err = phydev->drv->cable_test_get_status(phydev, &finished);
- if (err) {
- phy_abort_cable_test(phydev);
- netif_testing_off(dev);
- needs_aneg = true;
- phydev->state = PHY_UP;
- break;
- }
-
- if (finished) {
- ethnl_cable_test_finished(phydev);
- netif_testing_off(dev);
- needs_aneg = true;
- phydev->state = PHY_UP;
- }
- break;
- case PHY_HALTED:
- case PHY_ERROR:
- if (phydev->link) {
- phydev->link = 0;
- phy_link_down(phydev);
- }
- do_suspend = true;
- break;
- }
-
- mutex_unlock(&phydev->lock);
-
- if (needs_aneg) {
- err = phy_start_aneg(phydev);
- func = &phy_start_aneg;
- } else if (do_suspend) {
- phy_suspend(phydev);
- }
-
- if (err == -ENODEV)
- return;
-
- if (err < 0)
- phy_error_precise(phydev, func, err);
-
- phy_process_state_change(phydev, old_state);
-
- /* Only re-schedule a PHY state machine change if we are polling the
- * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving
- * between states from phy_mac_interrupt().
- *
- * In state PHY_HALTED the PHY gets suspended, so rescheduling the
- * state machine would be pointless and possibly error prone when
- * called from phy_disconnect() synchronously.
- */
- mutex_lock(&phydev->lock);
- if (phy_polling_mode(phydev) && phy_is_started(phydev))
- phy_queue_state_machine(phydev, PHY_STATE_TIME);
- mutex_unlock(&phydev->lock);
-}
-
-/**
* phy_mac_interrupt - MAC says the link has changed
* @phydev: phy_device struct with changed link
*
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 4ecfac227865..3679a43f4eb0 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -452,6 +452,11 @@ static const struct sfp_quirk sfp_quirks[] = {
// Rollball protocol to talk to the PHY.
SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+ // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
+ // NRZ in their EEPROM
+ SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
+ sfp_fixup_ignore_tx_fault),
+
SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
@@ -463,6 +468,9 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
sfp_fixup_ignore_tx_fault),
+ // FS 2.5G Base-T
+ SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+
// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
// 2500MBd NRZ in their EEPROM
SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index ba8b6bd8233c..8e7238e97d0a 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -877,7 +877,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
skb->dev = dev;
- skb->priority = sk->sk_priority;
+ skb->priority = READ_ONCE(sk->sk_priority);
skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
ph = skb_put(skb, total_len + sizeof(struct pppoe_hdr));
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0c13d9950cd8..1231bf365796 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2449,7 +2449,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
}
}
- if (list_empty(&tp->rx_done))
+ if (list_empty(&tp->rx_done) || work_done >= budget)
goto out1;
clear_bit(RX_EPROTO, &tp->flags);
@@ -2465,6 +2465,15 @@ static int rx_bottom(struct r8152 *tp, int budget)
struct urb *urb;
u8 *rx_data;
+ /* A bulk transfer of USB may contain may packets, so the
+ * total packets may more than the budget. Deal with all
+ * packets in current bulk transfer, and stop to handle the
+ * next bulk transfer until next schedule, if budget is
+ * exhausted.
+ */
+ if (work_done >= budget)
+ break;
+
list_del_init(cursor);
agg = list_entry(cursor, struct rx_agg, list);
@@ -2481,12 +2490,11 @@ static int rx_bottom(struct r8152 *tp, int budget)
while (urb->actual_length > len_used) {
struct net_device *netdev = tp->netdev;
struct net_device_stats *stats = &netdev->stats;
- unsigned int pkt_len, rx_frag_head_sz;
+ unsigned int pkt_len, rx_frag_head_sz, len;
struct sk_buff *skb;
+ bool use_frags;
- /* limit the skb numbers for rx_queue */
- if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
- break;
+ WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000);
pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
if (pkt_len < ETH_ZLEN)
@@ -2497,45 +2505,77 @@ static int rx_bottom(struct r8152 *tp, int budget)
break;
pkt_len -= ETH_FCS_LEN;
+ len = pkt_len;
rx_data += sizeof(struct rx_desc);
- if (!agg_free || tp->rx_copybreak > pkt_len)
- rx_frag_head_sz = pkt_len;
+ if (!agg_free || tp->rx_copybreak > len)
+ use_frags = false;
else
- rx_frag_head_sz = tp->rx_copybreak;
+ use_frags = true;
+
+ if (use_frags) {
+ /* If the budget is exhausted, the packet
+ * would be queued in the driver. That is,
+ * napi_gro_frags() wouldn't be called, so
+ * we couldn't use napi_get_frags().
+ */
+ if (work_done >= budget) {
+ rx_frag_head_sz = tp->rx_copybreak;
+ skb = napi_alloc_skb(napi,
+ rx_frag_head_sz);
+ } else {
+ rx_frag_head_sz = 0;
+ skb = napi_get_frags(napi);
+ }
+ } else {
+ rx_frag_head_sz = 0;
+ skb = napi_alloc_skb(napi, len);
+ }
- skb = napi_alloc_skb(napi, rx_frag_head_sz);
if (!skb) {
stats->rx_dropped++;
goto find_next_rx;
}
skb->ip_summed = r8152_rx_csum(tp, rx_desc);
- memcpy(skb->data, rx_data, rx_frag_head_sz);
- skb_put(skb, rx_frag_head_sz);
- pkt_len -= rx_frag_head_sz;
- rx_data += rx_frag_head_sz;
- if (pkt_len) {
+ rtl_rx_vlan_tag(rx_desc, skb);
+
+ if (use_frags) {
+ if (rx_frag_head_sz) {
+ memcpy(skb->data, rx_data,
+ rx_frag_head_sz);
+ skb_put(skb, rx_frag_head_sz);
+ len -= rx_frag_head_sz;
+ rx_data += rx_frag_head_sz;
+ skb->protocol = eth_type_trans(skb,
+ netdev);
+ }
+
skb_add_rx_frag(skb, 0, agg->page,
agg_offset(agg, rx_data),
- pkt_len,
- SKB_DATA_ALIGN(pkt_len));
+ len, SKB_DATA_ALIGN(len));
get_page(agg->page);
+ } else {
+ memcpy(skb->data, rx_data, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
}
- skb->protocol = eth_type_trans(skb, netdev);
- rtl_rx_vlan_tag(rx_desc, skb);
if (work_done < budget) {
+ if (use_frags)
+ napi_gro_frags(napi);
+ else
+ napi_gro_receive(napi, skb);
+
work_done++;
stats->rx_packets++;
- stats->rx_bytes += skb->len;
- napi_gro_receive(napi, skb);
+ stats->rx_bytes += pkt_len;
} else {
__skb_queue_tail(&tp->rx_queue, skb);
}
find_next_rx:
- rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN);
+ rx_data = rx_agg_align(rx_data + len + ETH_FCS_LEN);
rx_desc = (struct rx_desc *)rx_data;
len_used = agg_offset(agg, rx_data);
len_used += sizeof(struct rx_desc);
@@ -2564,9 +2604,10 @@ submit:
}
}
+ /* Splice the remained list back to rx_done for next schedule */
if (!list_empty(&rx_queue)) {
spin_lock_irqsave(&tp->rx_lock, flags);
- list_splice_tail(&rx_queue, &tp->rx_done);
+ list_splice(&rx_queue, &tp->rx_done);
spin_unlock_irqrestore(&tp->rx_lock, flags);
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fe7f314d65c9..1ed6bcc4cbb1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1258,7 +1258,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
if (unlikely(len > GOOD_PACKET_LEN)) {
pr_debug("%s: rx error: len %u exceeds max size %d\n",
dev->name, len, GOOD_PACKET_LEN);
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
@@ -1323,7 +1323,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
break;
}
stats->bytes += len;
@@ -1432,7 +1432,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, *num_buf,
virtio16_to_cpu(vi->vdev, hdr->num_buffers));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
@@ -1451,7 +1451,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
put_page(page);
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)(truesize - room));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
@@ -1630,7 +1630,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)(truesize - room));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err_skb;
}
@@ -1662,7 +1662,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
dev->name, num_buf,
virtio16_to_cpu(vi->vdev,
hdr->num_buffers));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err_buf;
}
@@ -1676,7 +1676,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)(truesize - room));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err_skb;
}
@@ -1763,7 +1763,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
virtnet_rq_free_unused_buf(rq->vq, buf);
return;
}
@@ -1803,7 +1803,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
frame_err:
- dev->stats.rx_frame_errors++;
+ DEV_STATS_INC(dev, rx_frame_errors);
dev_kfree_skb(skb);
}
@@ -2349,12 +2349,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This should not happen! */
if (unlikely(err)) {
- dev->stats.tx_fifo_errors++;
+ DEV_STATS_INC(dev, tx_fifo_errors);
if (net_ratelimit())
dev_warn(&dev->dev,
"Unexpected TXQ (%d) queue failure: %d\n",
qnum, err);
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2573,10 +2573,10 @@ static void virtnet_stats(struct net_device *dev,
tot->tx_errors += terrors;
}
- tot->tx_dropped = dev->stats.tx_dropped;
- tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
- tot->rx_length_errors = dev->stats.rx_length_errors;
- tot->rx_frame_errors = dev->stats.rx_frame_errors;
+ tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
+ tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
+ tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
+ tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
}
static void virtnet_ack_link_announce(struct virtnet_info *vi)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index fc7ace638ce8..e7d8e03f826f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -591,7 +591,7 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
- struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
+ struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
struct mt76_txwi_cache *t = NULL;
struct mt7915_dev *dev;
struct mt76_queue *q;
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 284ab1f56391..87df60916960 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
@@ -395,7 +396,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
char buf[0x20];
int id;
- idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ idmap = bitmap_zalloc(max_ports, GFP_KERNEL);
if (!idmap)
return -ENOMEM;
@@ -414,7 +415,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
/* Allocate unique id */
id = find_first_zero_bit(idmap, max_ports);
- free_page((unsigned long)idmap);
+ bitmap_free(idmap);
snprintf(buf, sizeof(buf), fmt, id); /* Name generation */
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index ed9d97a032f1..5dd5f188e14f 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -188,6 +188,7 @@ config PTP_1588_CLOCK_OCP
depends on COMMON_CLK
select NET_DEVLINK
select CRC16
+ select DPLL
help
This driver adds support for an OpenCompute time card.
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index a7a6947ab4bc..4021d3d325f9 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -23,6 +23,7 @@
#include <linux/mtd/mtd.h>
#include <linux/nvmem-consumer.h>
#include <linux/crc16.h>
+#include <linux/dpll.h>
#define PCI_VENDOR_ID_FACEBOOK 0x1d9b
#define PCI_DEVICE_ID_FACEBOOK_TIMECARD 0x0400
@@ -260,12 +261,21 @@ enum ptp_ocp_sma_mode {
SMA_MODE_OUT,
};
+static struct dpll_pin_frequency ptp_ocp_sma_freq[] = {
+ DPLL_PIN_FREQUENCY_1PPS,
+ DPLL_PIN_FREQUENCY_10MHZ,
+ DPLL_PIN_FREQUENCY_IRIG_B,
+ DPLL_PIN_FREQUENCY_DCF77,
+};
+
struct ptp_ocp_sma_connector {
enum ptp_ocp_sma_mode mode;
bool fixed_fcn;
bool fixed_dir;
bool disabled;
u8 default_fcn;
+ struct dpll_pin *dpll_pin;
+ struct dpll_pin_properties dpll_prop;
};
struct ocp_attr_group {
@@ -294,6 +304,7 @@ struct ptp_ocp_serial_port {
#define OCP_BOARD_ID_LEN 13
#define OCP_SERIAL_LEN 6
+#define OCP_SMA_NUM 4
struct ptp_ocp {
struct pci_dev *pdev;
@@ -331,7 +342,9 @@ struct ptp_ocp {
const struct attribute_group **attr_group;
const struct ptp_ocp_eeprom_map *eeprom_map;
struct dentry *debug_root;
+ bool sync;
time64_t gnss_lost;
+ struct delayed_work sync_work;
int id;
int n_irqs;
struct ptp_ocp_serial_port gnss_port;
@@ -350,8 +363,9 @@ struct ptp_ocp {
u32 ts_window_adjust;
u64 fw_cap;
struct ptp_ocp_signal signal[4];
- struct ptp_ocp_sma_connector sma[4];
+ struct ptp_ocp_sma_connector sma[OCP_SMA_NUM];
const struct ocp_sma_op *sma_op;
+ struct dpll_device *dpll;
};
#define OCP_REQ_TIMESTAMP BIT(0)
@@ -835,6 +849,7 @@ static DEFINE_IDR(ptp_ocp_idr);
struct ocp_selector {
const char *name;
int value;
+ u64 frequency;
};
static const struct ocp_selector ptp_ocp_clock[] = {
@@ -855,31 +870,31 @@ static const struct ocp_selector ptp_ocp_clock[] = {
#define SMA_SELECT_MASK GENMASK(14, 0)
static const struct ocp_selector ptp_ocp_sma_in[] = {
- { .name = "10Mhz", .value = 0x0000 },
- { .name = "PPS1", .value = 0x0001 },
- { .name = "PPS2", .value = 0x0002 },
- { .name = "TS1", .value = 0x0004 },
- { .name = "TS2", .value = 0x0008 },
- { .name = "IRIG", .value = 0x0010 },
- { .name = "DCF", .value = 0x0020 },
- { .name = "TS3", .value = 0x0040 },
- { .name = "TS4", .value = 0x0080 },
- { .name = "FREQ1", .value = 0x0100 },
- { .name = "FREQ2", .value = 0x0200 },
- { .name = "FREQ3", .value = 0x0400 },
- { .name = "FREQ4", .value = 0x0800 },
- { .name = "None", .value = SMA_DISABLE },
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000 },
+ { .name = "PPS1", .value = 0x0001, .frequency = 1 },
+ { .name = "PPS2", .value = 0x0002, .frequency = 1 },
+ { .name = "TS1", .value = 0x0004, .frequency = 0 },
+ { .name = "TS2", .value = 0x0008, .frequency = 0 },
+ { .name = "IRIG", .value = 0x0010, .frequency = 10000 },
+ { .name = "DCF", .value = 0x0020, .frequency = 77500 },
+ { .name = "TS3", .value = 0x0040, .frequency = 0 },
+ { .name = "TS4", .value = 0x0080, .frequency = 0 },
+ { .name = "FREQ1", .value = 0x0100, .frequency = 0 },
+ { .name = "FREQ2", .value = 0x0200, .frequency = 0 },
+ { .name = "FREQ3", .value = 0x0400, .frequency = 0 },
+ { .name = "FREQ4", .value = 0x0800, .frequency = 0 },
+ { .name = "None", .value = SMA_DISABLE, .frequency = 0 },
{ }
};
static const struct ocp_selector ptp_ocp_sma_out[] = {
- { .name = "10Mhz", .value = 0x0000 },
- { .name = "PHC", .value = 0x0001 },
- { .name = "MAC", .value = 0x0002 },
- { .name = "GNSS1", .value = 0x0004 },
- { .name = "GNSS2", .value = 0x0008 },
- { .name = "IRIG", .value = 0x0010 },
- { .name = "DCF", .value = 0x0020 },
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000 },
+ { .name = "PHC", .value = 0x0001, .frequency = 1 },
+ { .name = "MAC", .value = 0x0002, .frequency = 1 },
+ { .name = "GNSS1", .value = 0x0004, .frequency = 1 },
+ { .name = "GNSS2", .value = 0x0008, .frequency = 1 },
+ { .name = "IRIG", .value = 0x0010, .frequency = 10000 },
+ { .name = "DCF", .value = 0x0020, .frequency = 77000 },
{ .name = "GEN1", .value = 0x0040 },
{ .name = "GEN2", .value = 0x0080 },
{ .name = "GEN3", .value = 0x0100 },
@@ -890,15 +905,15 @@ static const struct ocp_selector ptp_ocp_sma_out[] = {
};
static const struct ocp_selector ptp_ocp_art_sma_in[] = {
- { .name = "PPS1", .value = 0x0001 },
- { .name = "10Mhz", .value = 0x0008 },
+ { .name = "PPS1", .value = 0x0001, .frequency = 1 },
+ { .name = "10Mhz", .value = 0x0008, .frequency = 1000000 },
{ }
};
static const struct ocp_selector ptp_ocp_art_sma_out[] = {
- { .name = "PHC", .value = 0x0002 },
- { .name = "GNSS", .value = 0x0004 },
- { .name = "10Mhz", .value = 0x0010 },
+ { .name = "PHC", .value = 0x0002, .frequency = 1 },
+ { .name = "GNSS", .value = 0x0004, .frequency = 1 },
+ { .name = "10Mhz", .value = 0x0010, .frequency = 10000000 },
{ }
};
@@ -1351,7 +1366,6 @@ static int
ptp_ocp_init_clock(struct ptp_ocp *bp)
{
struct timespec64 ts;
- bool sync;
u32 ctrl;
ctrl = OCP_CTRL_ENABLE;
@@ -1375,8 +1389,8 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
ptp_ocp_estimate_pci_timing(bp);
- sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
- if (!sync) {
+ bp->sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
+ if (!bp->sync) {
ktime_get_clocktai_ts64(&ts);
ptp_ocp_settime(&bp->ptp_info, &ts);
}
@@ -2289,22 +2303,35 @@ ptp_ocp_sma_fb_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val)
static void
ptp_ocp_sma_fb_init(struct ptp_ocp *bp)
{
+ struct dpll_pin_properties prop = {
+ .board_label = NULL,
+ .type = DPLL_PIN_TYPE_EXT,
+ .capabilities = DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE,
+ .freq_supported_num = ARRAY_SIZE(ptp_ocp_sma_freq),
+ .freq_supported = ptp_ocp_sma_freq,
+
+ };
u32 reg;
int i;
/* defaults */
+ for (i = 0; i < OCP_SMA_NUM; i++) {
+ bp->sma[i].default_fcn = i & 1;
+ bp->sma[i].dpll_prop = prop;
+ bp->sma[i].dpll_prop.board_label =
+ bp->ptp_info.pin_config[i].name;
+ }
bp->sma[0].mode = SMA_MODE_IN;
bp->sma[1].mode = SMA_MODE_IN;
bp->sma[2].mode = SMA_MODE_OUT;
bp->sma[3].mode = SMA_MODE_OUT;
- for (i = 0; i < 4; i++)
- bp->sma[i].default_fcn = i & 1;
-
/* If no SMA1 map, the pin functions and directions are fixed. */
if (!bp->sma_map1) {
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < OCP_SMA_NUM; i++) {
bp->sma[i].fixed_fcn = true;
bp->sma[i].fixed_dir = true;
+ bp->sma[1].dpll_prop.capabilities &=
+ ~DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE;
}
return;
}
@@ -2314,7 +2341,7 @@ ptp_ocp_sma_fb_init(struct ptp_ocp *bp)
*/
reg = ioread32(&bp->sma_map2->gpio2);
if (reg == 0xffffffff) {
- for (i = 0; i < 4; i++)
+ for (i = 0; i < OCP_SMA_NUM; i++)
bp->sma[i].fixed_dir = true;
} else {
reg = ioread32(&bp->sma_map1->gpio1);
@@ -2336,7 +2363,7 @@ static const struct ocp_sma_op ocp_fb_sma_op = {
};
static int
-ptp_ocp_fb_set_pins(struct ptp_ocp *bp)
+ptp_ocp_set_pins(struct ptp_ocp *bp)
{
struct ptp_pin_desc *config;
int i;
@@ -2403,16 +2430,16 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
ptp_ocp_tod_init(bp);
ptp_ocp_nmea_out_init(bp);
- ptp_ocp_sma_init(bp);
ptp_ocp_signal_init(bp);
err = ptp_ocp_attr_group_add(bp, fb_timecard_groups);
if (err)
return err;
- err = ptp_ocp_fb_set_pins(bp);
+ err = ptp_ocp_set_pins(bp);
if (err)
return err;
+ ptp_ocp_sma_init(bp);
return ptp_ocp_init_clock(bp);
}
@@ -2452,6 +2479,14 @@ ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
static void
ptp_ocp_art_sma_init(struct ptp_ocp *bp)
{
+ struct dpll_pin_properties prop = {
+ .board_label = NULL,
+ .type = DPLL_PIN_TYPE_EXT,
+ .capabilities = 0,
+ .freq_supported_num = ARRAY_SIZE(ptp_ocp_sma_freq),
+ .freq_supported = ptp_ocp_sma_freq,
+
+ };
u32 reg;
int i;
@@ -2466,16 +2501,16 @@ ptp_ocp_art_sma_init(struct ptp_ocp *bp)
bp->sma[2].default_fcn = 0x10; /* OUT: 10Mhz */
bp->sma[3].default_fcn = 0x02; /* OUT: PHC */
- /* If no SMA map, the pin functions and directions are fixed. */
- if (!bp->art_sma) {
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < OCP_SMA_NUM; i++) {
+ /* If no SMA map, the pin functions and directions are fixed. */
+ bp->sma[i].dpll_prop = prop;
+ bp->sma[i].dpll_prop.board_label =
+ bp->ptp_info.pin_config[i].name;
+ if (!bp->art_sma) {
bp->sma[i].fixed_fcn = true;
bp->sma[i].fixed_dir = true;
+ continue;
}
- return;
- }
-
- for (i = 0; i < 4; i++) {
reg = ioread32(&bp->art_sma->map[i].gpio);
switch (reg & 0xff) {
@@ -2486,9 +2521,13 @@ ptp_ocp_art_sma_init(struct ptp_ocp *bp)
case 1:
case 8:
bp->sma[i].mode = SMA_MODE_IN;
+ bp->sma[i].dpll_prop.capabilities =
+ DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE;
break;
default:
bp->sma[i].mode = SMA_MODE_OUT;
+ bp->sma[i].dpll_prop.capabilities =
+ DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE;
break;
}
}
@@ -2555,6 +2594,9 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
/* Enable MAC serial port during initialisation */
iowrite32(1, &bp->board_config->mro50_serial_activate);
+ err = ptp_ocp_set_pins(bp);
+ if (err)
+ return err;
ptp_ocp_sma_init(bp);
err = ptp_ocp_attr_group_add(bp, art_timecard_groups);
@@ -2696,16 +2738,9 @@ sma4_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static int
-ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr)
+ptp_ocp_sma_store_val(struct ptp_ocp *bp, int val, enum ptp_ocp_sma_mode mode, int sma_nr)
{
struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1];
- enum ptp_ocp_sma_mode mode;
- int val;
-
- mode = sma->mode;
- val = sma_parse_inputs(bp->sma_op->tbl, buf, &mode);
- if (val < 0)
- return val;
if (sma->fixed_dir && (mode != sma->mode || val & SMA_DISABLE))
return -EOPNOTSUPP;
@@ -2740,6 +2775,20 @@ ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr)
return val;
}
+static int
+ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr)
+{
+ struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1];
+ enum ptp_ocp_sma_mode mode;
+ int val;
+
+ mode = sma->mode;
+ val = sma_parse_inputs(bp->sma_op->tbl, buf, &mode);
+ if (val < 0)
+ return val;
+ return ptp_ocp_sma_store_val(bp, val, mode, sma_nr);
+}
+
static ssize_t
sma1_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -3834,9 +3883,8 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
strcpy(buf, "unknown");
break;
}
- val = ioread32(&bp->reg->status);
seq_printf(s, "%7s: %s, state: %s\n", "PHC src", buf,
- val & OCP_STATUS_IN_SYNC ? "sync" : "unsynced");
+ bp->sync ? "sync" : "unsynced");
if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) {
struct timespec64 sys_ts;
@@ -4067,7 +4115,6 @@ ptp_ocp_phc_info(struct ptp_ocp *bp)
{
struct timespec64 ts;
u32 version, select;
- bool sync;
version = ioread32(&bp->reg->version);
select = ioread32(&bp->reg->select);
@@ -4076,11 +4123,10 @@ ptp_ocp_phc_info(struct ptp_ocp *bp)
ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16),
ptp_clock_index(bp->ptp));
- sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n",
ts.tv_sec, ts.tv_nsec,
- sync ? "in-sync" : "UNSYNCED");
+ bp->sync ? "in-sync" : "UNSYNCED");
}
static void
@@ -4177,12 +4223,168 @@ ptp_ocp_detach(struct ptp_ocp *bp)
device_unregister(&bp->dev);
}
+static int ptp_ocp_dpll_lock_status_get(const struct dpll_device *dpll,
+ void *priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack)
+{
+ struct ptp_ocp *bp = priv;
+
+ *status = bp->sync ? DPLL_LOCK_STATUS_LOCKED : DPLL_LOCK_STATUS_UNLOCKED;
+
+ return 0;
+}
+
+static int ptp_ocp_dpll_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ptp_ocp *bp = priv;
+ int idx;
+
+ if (bp->pps_select) {
+ idx = ioread32(&bp->pps_select->gpio1);
+ *state = (&bp->sma[idx] == pin_priv) ? DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_SELECTABLE;
+ return 0;
+ }
+ NL_SET_ERR_MSG(extack, "pin selection is not supported on current HW");
+ return -EINVAL;
+}
+
+static int ptp_ocp_dpll_mode_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack)
+{
+ *mode = DPLL_MODE_AUTOMATIC;
+ return 0;
+}
+
+static bool ptp_ocp_dpll_mode_supported(const struct dpll_device *dpll,
+ void *priv, const enum dpll_mode mode,
+ struct netlink_ext_ack *extack)
+{
+ return mode == DPLL_MODE_AUTOMATIC;
+}
+
+static int ptp_ocp_dpll_direction_get(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ptp_ocp_sma_connector *sma = pin_priv;
+
+ *direction = sma->mode == SMA_MODE_IN ?
+ DPLL_PIN_DIRECTION_INPUT :
+ DPLL_PIN_DIRECTION_OUTPUT;
+ return 0;
+}
+
+static int ptp_ocp_dpll_direction_set(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ptp_ocp_sma_connector *sma = pin_priv;
+ struct ptp_ocp *bp = dpll_priv;
+ enum ptp_ocp_sma_mode mode;
+ int sma_nr = (sma - bp->sma);
+
+ if (sma->fixed_dir)
+ return -EOPNOTSUPP;
+ mode = direction == DPLL_PIN_DIRECTION_INPUT ?
+ SMA_MODE_IN : SMA_MODE_OUT;
+ return ptp_ocp_sma_store_val(bp, 0, mode, sma_nr);
+}
+
+static int ptp_ocp_dpll_frequency_set(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct ptp_ocp_sma_connector *sma = pin_priv;
+ struct ptp_ocp *bp = dpll_priv;
+ const struct ocp_selector *tbl;
+ int sma_nr = (sma - bp->sma);
+ int i;
+
+ if (sma->fixed_fcn)
+ return -EOPNOTSUPP;
+
+ tbl = bp->sma_op->tbl[sma->mode];
+ for (i = 0; tbl[i].name; i++)
+ if (tbl[i].frequency == frequency)
+ return ptp_ocp_sma_store_val(bp, i, sma->mode, sma_nr);
+ return -EINVAL;
+}
+
+static int ptp_ocp_dpll_frequency_get(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 *frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct ptp_ocp_sma_connector *sma = pin_priv;
+ struct ptp_ocp *bp = dpll_priv;
+ const struct ocp_selector *tbl;
+ int sma_nr = (sma - bp->sma);
+ u32 val;
+ int i;
+
+ val = bp->sma_op->get(bp, sma_nr);
+ tbl = bp->sma_op->tbl[sma->mode];
+ for (i = 0; tbl[i].name; i++)
+ if (val == tbl[i].value) {
+ *frequency = tbl[i].frequency;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct dpll_device_ops dpll_ops = {
+ .lock_status_get = ptp_ocp_dpll_lock_status_get,
+ .mode_get = ptp_ocp_dpll_mode_get,
+ .mode_supported = ptp_ocp_dpll_mode_supported,
+};
+
+static const struct dpll_pin_ops dpll_pins_ops = {
+ .frequency_get = ptp_ocp_dpll_frequency_get,
+ .frequency_set = ptp_ocp_dpll_frequency_set,
+ .direction_get = ptp_ocp_dpll_direction_get,
+ .direction_set = ptp_ocp_dpll_direction_set,
+ .state_on_dpll_get = ptp_ocp_dpll_state_get,
+};
+
+static void
+ptp_ocp_sync_work(struct work_struct *work)
+{
+ struct ptp_ocp *bp;
+ bool sync;
+
+ bp = container_of(work, struct ptp_ocp, sync_work.work);
+ sync = !!(ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC);
+
+ if (bp->sync != sync)
+ dpll_device_change_ntf(bp->dpll);
+
+ bp->sync = sync;
+
+ queue_delayed_work(system_power_efficient_wq, &bp->sync_work, HZ);
+}
+
static int
ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct devlink *devlink;
struct ptp_ocp *bp;
- int err;
+ int err, i;
+ u64 clkid;
devlink = devlink_alloc(&ptp_ocp_devlink_ops, sizeof(*bp), &pdev->dev);
if (!devlink) {
@@ -4201,6 +4403,8 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto out_disable;
+ INIT_DELAYED_WORK(&bp->sync_work, ptp_ocp_sync_work);
+
/* compat mode.
* Older FPGA firmware only returns 2 irq's.
* allow this - if not all of the IRQ's are returned, skip the
@@ -4232,8 +4436,43 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ptp_ocp_info(bp);
devlink_register(devlink);
- return 0;
+ clkid = pci_get_dsn(pdev);
+ bp->dpll = dpll_device_get(clkid, 0, THIS_MODULE);
+ if (IS_ERR(bp->dpll)) {
+ err = PTR_ERR(bp->dpll);
+ dev_err(&pdev->dev, "dpll_device_alloc failed\n");
+ goto out;
+ }
+
+ err = dpll_device_register(bp->dpll, DPLL_TYPE_PPS, &dpll_ops, bp);
+ if (err)
+ goto out;
+
+ for (i = 0; i < OCP_SMA_NUM; i++) {
+ bp->sma[i].dpll_pin = dpll_pin_get(clkid, i, THIS_MODULE, &bp->sma[i].dpll_prop);
+ if (IS_ERR(bp->sma[i].dpll_pin)) {
+ err = PTR_ERR(bp->sma[i].dpll_pin);
+ goto out_dpll;
+ }
+
+ err = dpll_pin_register(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops,
+ &bp->sma[i]);
+ if (err) {
+ dpll_pin_put(bp->sma[i].dpll_pin);
+ goto out_dpll;
+ }
+ }
+ queue_delayed_work(system_power_efficient_wq, &bp->sync_work, HZ);
+
+ return 0;
+out_dpll:
+ while (i) {
+ --i;
+ dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, &bp->sma[i]);
+ dpll_pin_put(bp->sma[i].dpll_pin);
+ }
+ dpll_device_put(bp->dpll);
out:
ptp_ocp_detach(bp);
out_disable:
@@ -4248,7 +4487,17 @@ ptp_ocp_remove(struct pci_dev *pdev)
{
struct ptp_ocp *bp = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(bp);
+ int i;
+ cancel_delayed_work_sync(&bp->sync_work);
+ for (i = 0; i < OCP_SMA_NUM; i++) {
+ if (bp->sma[i].dpll_pin) {
+ dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, bp);
+ dpll_pin_put(bp->sma[i].dpll_pin);
+ }
+ }
+ dpll_device_unregister(bp->dpll, &dpll_ops, bp);
+ dpll_device_put(bp->dpll);
devlink_unregister(devlink);
ptp_ocp_detach(bp);
pci_disable_device(pdev);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 817d377a3f36..83711aad855c 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -114,6 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct sk_buff *skb;
unsigned out, in;
size_t nbytes;
+ u32 offset;
int head;
skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
@@ -156,7 +157,8 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
}
iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
- payload_len = skb->len;
+ offset = VIRTIO_VSOCK_SKB_CB(skb)->offset;
+ payload_len = skb->len - offset;
hdr = virtio_vsock_hdr(skb);
/* If the packet is greater than the space available in the
@@ -197,8 +199,10 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
break;
}
- nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
- if (nbytes != payload_len) {
+ if (skb_copy_datagram_iter(skb,
+ offset,
+ &iov_iter,
+ payload_len)) {
kfree_skb(skb);
vq_err(vq, "Faulted on copying pkt buf\n");
break;
@@ -212,13 +216,13 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
added = true;
- skb_pull(skb, payload_len);
+ VIRTIO_VSOCK_SKB_CB(skb)->offset += payload_len;
total_len += payload_len;
/* If we didn't send all the payload we can requeue the packet
* to send it with the next available buffer.
*/
- if (skb->len > 0) {
+ if (VIRTIO_VSOCK_SKB_CB(skb)->offset < skb->len) {
hdr->flags |= cpu_to_le32(flags_to_restore);
/* We are queueing the same skb to handle
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index d0807ad43f93..dd71d3009771 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -240,6 +240,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
/* used to negotiate communicating link speeds in Mbps */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
@@ -295,7 +296,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
*/
/* Rx queue config info */
@@ -307,7 +314,7 @@ struct virtchnl_rxq_info {
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
- u8 pad0;
+ u8 crc_disable;
u8 rxdid;
u8 pad1[2];
u64 dma_ring_addr;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 49f8b691496c..a34ac7f00c86 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -55,8 +55,8 @@ struct cgroup;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
-extern struct bpf_mem_alloc bpf_global_ma;
-extern bool bpf_global_ma_set;
+extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
+extern bool bpf_global_ma_set, bpf_global_percpu_ma_set;
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
@@ -180,14 +180,15 @@ enum btf_field_type {
BPF_TIMER = (1 << 1),
BPF_KPTR_UNREF = (1 << 2),
BPF_KPTR_REF = (1 << 3),
- BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
- BPF_LIST_HEAD = (1 << 4),
- BPF_LIST_NODE = (1 << 5),
- BPF_RB_ROOT = (1 << 6),
- BPF_RB_NODE = (1 << 7),
+ BPF_KPTR_PERCPU = (1 << 4),
+ BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
+ BPF_LIST_HEAD = (1 << 5),
+ BPF_LIST_NODE = (1 << 6),
+ BPF_RB_ROOT = (1 << 7),
+ BPF_RB_NODE = (1 << 8),
BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
BPF_RB_NODE | BPF_RB_ROOT,
- BPF_REFCOUNT = (1 << 8),
+ BPF_REFCOUNT = (1 << 9),
};
typedef void (*btf_dtor_kfunc_t)(void *);
@@ -300,6 +301,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
return "kptr";
+ case BPF_KPTR_PERCPU:
+ return "percpu_kptr";
case BPF_LIST_HEAD:
return "bpf_list_head";
case BPF_LIST_NODE:
@@ -325,6 +328,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_timer);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
return sizeof(u64);
case BPF_LIST_HEAD:
return sizeof(struct bpf_list_head);
@@ -351,6 +355,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_timer);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
return __alignof__(u64);
case BPF_LIST_HEAD:
return __alignof__(struct bpf_list_head);
@@ -389,6 +394,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
case BPF_TIMER:
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
break;
default:
WARN_ON_ONCE(1);
@@ -1029,6 +1035,11 @@ struct btf_func_model {
*/
#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+/* Indicate that current trampoline is in a tail call context. Then, it has to
+ * cache and restore tail_call_cnt to avoid infinite tail call loop.
+ */
+#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
+
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86.
*/
@@ -1378,6 +1389,7 @@ struct bpf_prog_aux {
u32 stack_depth;
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
u32 ctx_arg_info_size;
@@ -1398,6 +1410,8 @@ struct bpf_prog_aux {
bool sleepable;
bool tail_call_reachable;
bool xdp_has_frags;
+ bool exception_cb;
+ bool exception_boundary;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
@@ -1420,6 +1434,7 @@ struct bpf_prog_aux {
int cgroup_atype; /* enum cgroup_bpf_attach_type */
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
+ unsigned int (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp);
#ifdef CONFIG_SECURITY
void *security;
#endif
@@ -2407,9 +2422,11 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *reg);
+ struct bpf_reg_state *reg, bool is_ex_cb);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
+const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key);
struct bpf_prog *bpf_prog_by_id(u32 id);
struct bpf_link *bpf_link_by_id(u32 id);
@@ -3183,4 +3200,9 @@ static inline gfp_t bpf_memcg_flags(gfp_t flags)
return flags;
}
+static inline bool bpf_is_subprog(const struct bpf_prog *prog)
+{
+ return prog->aux->func_idx != 0;
+}
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index b6e58dab8e27..94ec766432f5 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -300,6 +300,7 @@ struct bpf_func_state {
bool in_callback_fn;
struct tnum callback_ret_range;
bool in_async_callback_fn;
+ bool in_exception_callback_fn;
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
@@ -480,6 +481,7 @@ struct bpf_insn_aux_data {
bool zext_dst; /* this insn zero extends dst reg */
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
+ bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
/* below fields are initialized once */
@@ -540,7 +542,9 @@ struct bpf_subprog_info {
bool has_tail_call;
bool tail_call_reachable;
bool has_ld_abs;
+ bool is_cb;
bool is_async_cb;
+ bool is_exception_cb;
};
struct bpf_verifier_env;
@@ -587,6 +591,8 @@ struct bpf_verifier_env {
u32 used_map_cnt; /* number of used maps */
u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
+ u32 hidden_subprog_cnt; /* number of hidden subprogs */
+ int exception_callback_subprog;
bool explore_alu_limits;
bool allow_ptr_leaks;
bool allow_uninit_stack;
@@ -594,10 +600,11 @@ struct bpf_verifier_env {
bool bypass_spec_v1;
bool bypass_spec_v4;
bool seen_direct_write;
+ bool seen_exception;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
- struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
+ struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
union {
struct bpf_idmap idmap_scratch;
struct bpf_idset idset_scratch;
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index b658961156a0..7a9a40163c0f 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -19,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[];
+ struct ceph_entity_inst mon_inst[] __counted_by(num_mon);
};
struct ceph_mon_client;
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index c523c6683789..6f1ca49306d2 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -2,6 +2,15 @@
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
+/*
+ * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
+ * In the meantime, to support gcc < 10, we implement __has_builtin
+ * by hand.
+ */
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
#ifndef __ASSEMBLY__
/*
@@ -134,17 +143,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __preserve_most
#endif
-/* Builtins */
-
-/*
- * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
- * In the meantime, to support gcc < 10, we implement __has_builtin
- * by hand.
- */
-#ifndef __has_builtin
-#define __has_builtin(x) (0)
-#endif
-
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
@@ -352,6 +350,18 @@ struct ftrace_likely_data {
# define __realloc_size(x, ...)
#endif
+/*
+ * When the size of an allocated object is needed, use the best available
+ * mechanism to find it. (For cases where sizeof() cannot be used.)
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
+#define __member_size(p) __builtin_dynamic_object_size(p, 1)
+#else
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+#endif
+
#ifndef asm_volatile_goto
#define asm_volatile_goto(x...) asm goto(x)
#endif
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
new file mode 100644
index 000000000000..bbc480cd2932
--- /dev/null
+++ b/include/linux/dpll.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel and affiliates
+ */
+
+#ifndef __DPLL_H__
+#define __DPLL_H__
+
+#include <uapi/linux/dpll.h>
+#include <linux/device.h>
+#include <linux/netlink.h>
+
+struct dpll_device;
+struct dpll_pin;
+
+struct dpll_device_ops {
+ int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack);
+ bool (*mode_supported)(const struct dpll_device *dpll, void *dpll_priv,
+ const enum dpll_mode mode,
+ struct netlink_ext_ack *extack);
+ int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack);
+ int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
+ s32 *temp, struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_ops {
+ int (*frequency_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u64 frequency,
+ struct netlink_ext_ack *extack);
+ int (*frequency_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack);
+ int (*direction_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack);
+ int (*direction_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*prio_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack);
+ int (*prio_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u32 prio, struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_frequency {
+ u64 min;
+ u64 max;
+};
+
+#define DPLL_PIN_FREQUENCY_RANGE(_min, _max) \
+ { \
+ .min = _min, \
+ .max = _max, \
+ }
+
+#define DPLL_PIN_FREQUENCY(_val) DPLL_PIN_FREQUENCY_RANGE(_val, _val)
+#define DPLL_PIN_FREQUENCY_1PPS \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_1_HZ)
+#define DPLL_PIN_FREQUENCY_10MHZ \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_MHZ)
+#define DPLL_PIN_FREQUENCY_IRIG_B \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_KHZ)
+#define DPLL_PIN_FREQUENCY_DCF77 \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_77_5_KHZ)
+
+struct dpll_pin_properties {
+ const char *board_label;
+ const char *panel_label;
+ const char *package_label;
+ enum dpll_pin_type type;
+ unsigned long capabilities;
+ u32 freq_supported_num;
+ struct dpll_pin_frequency *freq_supported;
+};
+
+#if IS_ENABLED(CONFIG_DPLL)
+size_t dpll_msg_pin_handle_size(struct dpll_pin *pin);
+int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin);
+#else
+static inline size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
+{
+ return 0;
+}
+
+static inline int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+{
+ return 0;
+}
+#endif
+
+struct dpll_device *
+dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module);
+
+void dpll_device_put(struct dpll_device *dpll);
+
+int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
+ const struct dpll_device_ops *ops, void *priv);
+
+void dpll_device_unregister(struct dpll_device *dpll,
+ const struct dpll_device_ops *ops, void *priv);
+
+struct dpll_pin *
+dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module,
+ const struct dpll_pin_properties *prop);
+
+int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_put(struct dpll_pin *pin);
+
+int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+int dpll_device_change_ntf(struct dpll_device *dpll);
+
+int dpll_pin_change_ntf(struct dpll_pin *pin);
+
+#endif
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 761af6b3cf2b..e8822bd595f9 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -117,21 +117,25 @@ struct ctl_table_header;
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
-#define BPF_ALU64_IMM(OP, DST, IMM) \
+#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU64_IMM(OP, DST, IMM) \
+ BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
-#define BPF_ALU32_IMM(OP, DST, IMM) \
+#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU32_IMM(OP, DST, IMM) \
+ BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
@@ -143,6 +147,16 @@ struct ctl_table_header;
.off = 0, \
.imm = LEN })
+/* Byte Swap, bswap16/32/64 */
+
+#define BPF_BSWAP(DST, LEN) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = LEN })
+
/* Short form of mov, dst_reg = src_reg */
#define BPF_MOV64_REG(DST, SRC) \
@@ -179,6 +193,24 @@ struct ctl_table_header;
.off = 0, \
.imm = IMM })
+/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
+
+#define BPF_MOVSX64_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+#define BPF_MOVSX32_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Special form of mov32, used for doing explicit zero extension on dst. */
#define BPF_ZEXT_REG(DST) \
((struct bpf_insn) { \
@@ -263,6 +295,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
+
+#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
@@ -912,6 +954,8 @@ bool bpf_jit_needs_zext(void);
bool bpf_jit_supports_subprog_tailcalls(void);
bool bpf_jit_supports_kfunc_call(void);
bool bpf_jit_supports_far_kfunc_call(void);
+bool bpf_jit_supports_exceptions(void);
+void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
static inline bool bpf_dump_raw_ok(const struct cred *cred)
@@ -981,12 +1025,6 @@ int xdp_do_redirect_frame(struct net_device *dev,
struct bpf_prog *prog);
void xdp_do_flush(void);
-/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
- * it is no longer only flushing maps. Keep this define for compatibility
- * until all drivers are updated - do not use xdp_do_flush_map() in new code!
- */
-#define xdp_do_flush_map xdp_do_flush
-
void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET
@@ -1127,6 +1165,7 @@ const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
+struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
@@ -1194,6 +1233,11 @@ static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
return -ERANGE;
}
+static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+{
+ return NULL;
+}
+
static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index da51a83b2829..1e7711185ec6 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -93,13 +93,9 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
#if __has_builtin(__builtin_dynamic_object_size)
#define POS __pass_dynamic_object_size(1)
#define POS0 __pass_dynamic_object_size(0)
-#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
-#define __member_size(p) __builtin_dynamic_object_size(p, 1)
#else
#define POS __pass_object_size(1)
#define POS0 __pass_object_size(0)
-#define __struct_size(p) __builtin_object_size(p, 0)
-#define __member_size(p) __builtin_object_size(p, 1)
#endif
#define __compiletime_lessthan(bounds, length) ( \
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index ebf4349a53af..5171231f70a8 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -39,7 +39,7 @@ struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- __be32 sl_addr[];
+ __be32 sl_addr[] __counted_by(sl_max);
};
#define IP_SFBLOCK 10 /* allocate this many at once */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index af8a771a053c..5e605e384aac 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -82,6 +82,7 @@ struct ipv6_devconf {
__u32 ioam6_id_wide;
__u8 ioam6_enabled;
__u8 ndisc_evict_nocarrier;
+ __u8 ra_honor_pio_life;
struct ctl_table_header *sysctl_header;
};
@@ -213,28 +214,9 @@ struct ipv6_pinfo {
__be32 flow_label;
__u32 frag_size;
- /*
- * Packed in 16bits.
- * Omit one shift by putting the signed field at MSB.
- */
-#if defined(__BIG_ENDIAN_BITFIELD)
- __s16 hop_limit:9;
- __u16 __unused_1:7;
-#else
- __u16 __unused_1:7;
- __s16 hop_limit:9;
-#endif
+ s16 hop_limit;
+ u8 mcast_hops;
-#if defined(__BIG_ENDIAN_BITFIELD)
- /* Packed in 16bits. */
- __s16 mcast_hops:9;
- __u16 __unused_2:6,
- mc_loop:1;
-#else
- __u16 mc_loop:1,
- __unused_2:6;
- __s16 mcast_hops:9;
-#endif
int ucast_oif;
int mcast_oif;
@@ -262,21 +244,11 @@ struct ipv6_pinfo {
} rxopt;
/* sockopt flags */
- __u16 recverr:1,
- sndflow:1,
- repflow:1,
- pmtudisc:3,
- padding:1, /* 1 bit hole */
- srcprefs:3, /* 001: prefer temporary address
+ __u8 srcprefs; /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
- dontfrag:1,
- autoflowlabel:1,
- autoflowlabel_set:1,
- mc_all:1,
- recverr_rfc4884:1,
- rtalert_isolate:1;
+ __u8 pmtudisc;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;
@@ -293,6 +265,18 @@ struct ipv6_pinfo {
struct inet6_cork cork;
};
+/* We currently use available bits from inet_sk(sk)->inet_flags,
+ * this could change in the future.
+ */
+#define inet6_test_bit(nr, sk) \
+ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_set_bit(nr, sk) \
+ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_clear_bit(nr, sk) \
+ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
+
/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
struct raw6_sock {
/* inet_sock has to be the first member of raw6_sock */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 842623d708c2..9bc5b4d35494 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -285,8 +285,10 @@ static inline bool kasan_check_byte(const void *address)
#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
void kasan_unpoison_task_stack(struct task_struct *task);
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
#else
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
#endif
#ifdef CONFIG_KASAN_GENERIC
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 4d5be378fa8c..8fbe22de16ef 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -366,6 +366,7 @@ enum mlx5_driver_event {
MLX5_DRIVER_EVENT_UPLINK_NETDEV,
MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
+ MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
};
enum {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 3033bbaeac81..92434814c855 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -155,6 +155,8 @@ enum {
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
+ MLX5_REG_MSECQ = 0x9155,
+ MLX5_REG_MSEES = 0x9156,
MLX5_REG_MIRC = 0x9162,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 1e00c2436377..6f7725238abc 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -67,6 +67,7 @@ enum {
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
+ MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
};
#define LEFTOVERS_RULE_NUM 2
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index fc3db401f8a2..b23d8ff286a1 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10176,7 +10176,9 @@ struct mlx5_ifc_mcam_access_reg_bits2 {
u8 mirc[0x1];
u8 regs_97_to_96[0x2];
- u8 regs_95_to_64[0x20];
+ u8 regs_95_to_87[0x09];
+ u8 synce_registers[0x2];
+ u8 regs_84_to_64[0x15];
u8 regs_63_to_32[0x20];
@@ -10572,6 +10574,7 @@ enum {
MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12,
};
struct mlx5_ifc_initial_seg_bits {
@@ -12549,4 +12552,59 @@ struct mlx5_ifc_modify_page_track_obj_in_bits {
struct mlx5_ifc_page_track_bits obj_context;
};
+struct mlx5_ifc_msecq_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x12];
+ u8 network_option[0x2];
+ u8 local_ssm_code[0x4];
+ u8 local_enhanced_ssm_code[0x8];
+
+ u8 local_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
+enum {
+ MLX5_MSEES_FIELD_SELECT_ENABLE = BIT(0),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS = BIT(1),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE = BIT(2),
+};
+
+enum mlx5_msees_admin_status {
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_ADMIN_STATUS_TRACK = 0x1,
+};
+
+enum mlx5_msees_oper_status {
+ MLX5_MSEES_OPER_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_OPER_STATUS_SELF_TRACK = 0x1,
+ MLX5_MSEES_OPER_STATUS_OTHER_TRACK = 0x2,
+ MLX5_MSEES_OPER_STATUS_HOLDOVER = 0x3,
+ MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER = 0x4,
+ MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5,
+};
+
+struct mlx5_ifc_msees_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 lp_msb[0x2];
+ u8 reserved_at_14[0xc];
+
+ u8 field_select[0x20];
+
+ u8 admin_status[0x4];
+ u8 oper_status[0x4];
+ u8 ho_acq[0x1];
+ u8 reserved_at_49[0xc];
+ u8 admin_freq_measure[0x1];
+ u8 oper_freq_measure[0x1];
+ u8 failure_reason[0x9];
+
+ u8 frequency_diff[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0896aaa91dd7..e070a4540fba 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -79,6 +79,8 @@ struct xdp_buff;
struct xdp_frame;
struct xdp_metadata_ops;
struct xdp_md;
+/* DPLL specific */
+struct dpll_pin;
typedef u32 xdp_features_t;
@@ -917,6 +919,7 @@ struct net_device_path {
u8 queue;
u16 wcid;
u8 bss;
+ u8 amsdu;
} mtk_wdma;
};
};
@@ -2049,6 +2052,9 @@ enum netdev_ml_priv_type {
* SET_NETDEV_DEVLINK_PORT macro. This pointer is static
* during the time netdevice is registered.
*
+ * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
+ * where the clock is recovered.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -2405,6 +2411,10 @@ struct net_device {
struct rtnl_hw_stats64 *offload_xstats_l3;
struct devlink_port *devlink_port;
+
+#if IS_ENABLED(CONFIG_DPLL)
+ struct dpll_pin *dpll_pin;
+#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -3940,6 +3950,18 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
+void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
+void netdev_dpll_pin_clear(struct net_device *dev);
+
+static inline struct dpll_pin *netdev_dpll_pin(const struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_DPLL)
+ return dev->dpll_pin;
+#else
+ return NULL;
+#endif
+}
+
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -5214,5 +5236,6 @@ extern struct net_device *blackhole_netdev;
#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
#define DEV_STATS_ADD(DEV, FIELD, VAL) \
atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
+#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index f9b60313eaea..7b5cf4a5cd19 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -309,4 +309,39 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
#define struct_size_t(type, member, count) \
struct_size((type *)NULL, member, count)
+/**
+ * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass (different) initializer.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ * @initializer: initializer expression (could be empty for no init).
+ */
+#define _DEFINE_FLEX(type, name, member, count, initializer) \
+ _Static_assert(__builtin_constant_p(count), \
+ "onstack flex array members require compile-time const count"); \
+ union { \
+ u8 bytes[struct_size_t(type, member, count)]; \
+ type obj; \
+ } name##_u initializer; \
+ type *name = (type *)&name##_u
+
+/**
+ * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ *
+ * Define a zeroed, on-stack, instance of @type structure with a trailing
+ * flexible array member.
+ * Use __struct_size(@name) to get compile-time size of it afterwards.
+ */
+#define DEFINE_FLEX(type, name, member, count) \
+ _DEFINE_FLEX(type, name, member, count, = {})
+
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h
index e838a2b90440..17a87c1a55d7 100644
--- a/include/linux/pds/pds_core_if.h
+++ b/include/linux/pds/pds_core_if.h
@@ -79,6 +79,7 @@ enum pds_core_status_code {
PDS_RC_EVFID = 31, /* VF ID does not exist */
PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */
PDS_RC_ECLIENT = 33, /* No such client id */
+ PDS_RC_BAD_PCI = 255, /* Broken PCI when reading status */
};
/**
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 1351b802ffcf..3cc52826f18e 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1736,6 +1736,7 @@ void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_config_aneg(struct phy_device *phydev);
+int _phy_start_aneg(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
int phy_speed_down(struct phy_device *phydev, bool sync);
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index b2b28180dff7..a476648858a6 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -10,6 +10,7 @@
#define MTK_WED_TX_QUEUES 2
#define MTK_WED_RX_QUEUES 2
+#define MTK_WED_RX_PAGE_QUEUES 3
#define WED_WO_STA_REC 0x6
@@ -45,7 +46,7 @@ enum mtk_wed_wo_cmd {
MTK_WED_WO_CMD_WED_END
};
-struct mtk_rxbm_desc {
+struct mtk_wed_bm_desc {
__le32 buf0;
__le32 token;
} __packed __aligned(4);
@@ -76,6 +77,11 @@ struct mtk_wed_wo_rx_stats {
__le32 rx_drop_cnt;
};
+struct mtk_wed_buf {
+ void *p;
+ dma_addr_t phy_addr;
+};
+
struct mtk_wed_device {
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
const struct mtk_wed_ops *ops;
@@ -94,17 +100,20 @@ struct mtk_wed_device {
struct mtk_wed_ring txfree_ring;
struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
+ struct mtk_wed_ring ind_cmd_ring;
struct {
int size;
- void **pages;
+ struct mtk_wed_buf *pages;
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
} tx_buf_ring;
struct {
int size;
- struct mtk_rxbm_desc *desc;
+ struct mtk_wed_bm_desc *desc;
dma_addr_t desc_phys;
} rx_buf_ring;
@@ -114,6 +123,13 @@ struct mtk_wed_device {
dma_addr_t fdbk_phys;
} rro;
+ struct {
+ int size;
+ struct mtk_wed_buf *pages;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } hw_rro;
+
/* filled by driver: */
struct {
union {
@@ -123,6 +139,7 @@ struct mtk_wed_device {
enum mtk_wed_bus_tye bus_type;
void __iomem *base;
u32 phy_base;
+ u32 id;
u32 wpdma_phys;
u32 wpdma_int;
@@ -131,18 +148,35 @@ struct mtk_wed_device {
u32 wpdma_txfree;
u32 wpdma_rx_glo;
u32 wpdma_rx;
+ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
+ u32 wpdma_rx_pg;
bool wcid_512;
+ bool hw_rro;
+ bool msi;
u16 token_start;
unsigned int nbuf;
unsigned int rx_nbuf;
unsigned int rx_npkt;
unsigned int rx_size;
+ unsigned int amsdu_max_len;
u8 tx_tbit[MTK_WED_TX_QUEUES];
u8 rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
u8 txfree_tbit;
+ u8 amsdu_max_subframes;
+
+ struct {
+ u8 se_group_nums;
+ u16 win_size;
+ u16 particular_sid;
+ u32 ack_sn_addr;
+ dma_addr_t particular_se_phys;
+ dma_addr_t addr_elem_phys[1024];
+ } ind_cmd;
u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
int (*offload_enable)(struct mtk_wed_device *wed);
@@ -182,6 +216,14 @@ struct mtk_wed_ops {
void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
enum tc_setup_type type, void *type_data);
+ void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
+ bool reset);
+ void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
+ void __iomem *regs);
};
extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
@@ -206,16 +248,27 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
return ret;
}
-static inline bool
-mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (dev->version == 3)
+ return dev->wlan.hw_rro;
+
return dev->version != 1;
#else
return false;
#endif
}
+static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ return dev->version == 3;
+#else
+ return false;
+#endif
+}
+
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
#define mtk_wed_device_active(_dev) !!(_dev)->ops
#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
@@ -242,6 +295,15 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
(_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
+ (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
+ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
+
#else
static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
{
@@ -261,6 +323,10 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
#define mtk_wed_device_stop(_dev) do {} while (0)
#define mtk_wed_device_dma_reset(_dev) do {} while (0)
#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
#endif
#endif
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index ce89cc3e4913..c0079a7574ae 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -139,6 +139,7 @@ struct stmmac_rxq_cfg {
struct stmmac_txq_cfg {
u32 weight;
+ bool coe_unsupported;
u8 mode_to_use;
/* Credit Base Shaper parameters */
u32 send_slope;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3c5efeeb024f..e15452df9804 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -377,6 +377,14 @@ struct tcp_sock {
* Total data bytes retransmitted
*/
u32 total_retrans; /* Total retransmits for entire connection */
+ u32 rto_stamp; /* Start time (ms) of last CA_Loss recovery */
+ u16 total_rto; /* Total number of RTO timeouts, including
+ * SYN/SYN-ACK and recurring timeouts.
+ */
+ u16 total_rto_recoveries; /* Total number of RTO recoveries,
+ * including any unfinished recovery.
+ */
+ u32 total_rto_time; /* ms spent in (completed) RTO recoveries. */
u32 urg_seq; /* Seq of received urgent pointer */
unsigned int keepalive_time; /* time before keep alive takes place */
@@ -463,15 +471,17 @@ enum tsq_enum {
TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call
* tcp_v{4|6}_mtu_reduced()
*/
+ TCP_ACK_DEFERRED, /* TX pure ack is deferred */
};
enum tsq_flags {
- TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
- TSQF_QUEUED = (1UL << TSQ_QUEUED),
- TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
- TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
- TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
- TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
+ TSQF_THROTTLED = BIT(TSQ_THROTTLED),
+ TSQF_QUEUED = BIT(TSQ_QUEUED),
+ TCPF_TSQ_DEFERRED = BIT(TCP_TSQ_DEFERRED),
+ TCPF_WRITE_TIMER_DEFERRED = BIT(TCP_WRITE_TIMER_DEFERRED),
+ TCPF_DELACK_TIMER_DEFERRED = BIT(TCP_DELACK_TIMER_DEFERRED),
+ TCPF_MTU_REDUCED_DEFERRED = BIT(TCP_MTU_REDUCED_DEFERRED),
+ TCPF_ACK_DEFERRED = BIT(TCP_ACK_DEFERRED),
};
#define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 43c1fb2d2c21..d04188714dca 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -32,25 +32,30 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
return (num + net_hash_mix(net)) & mask;
}
+enum {
+ UDP_FLAGS_CORK, /* Cork is required */
+ UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
+ UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
+ UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
+ UDP_FLAGS_ACCEPT_FRAGLIST,
+ UDP_FLAGS_ACCEPT_L4,
+ UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
+ UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
+ UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
+};
+
struct udp_sock {
/* inet_sock has to be the first member */
struct inet_sock inet;
#define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
+
+ unsigned long udp_flags;
+
int pending; /* Any pending frames ? */
- unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
- unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
- encap_enabled:1, /* This socket enabled encap
- * processing; UDP tunnels and
- * different encapsulation layer set
- * this
- */
- gro_enabled:1, /* Request GRO aggregation */
- accept_udp_l4:1,
- accept_udp_fraglist:1;
+
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -62,12 +67,6 @@ struct udp_sock {
*/
__u16 pcslen;
__u16 pcrlen;
-/* indicator bits used by pcflag: */
-#define UDPLITE_BIT 0x1 /* set by udplite proto init function */
-#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
-#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
- __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
- __u8 unused[3];
/*
* For encapsulation sockets.
*/
@@ -95,28 +94,39 @@ struct udp_sock {
int forward_threshold;
};
+#define udp_test_bit(nr, sk) \
+ test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_set_bit(nr, sk) \
+ set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_test_and_set_bit(nr, sk) \
+ test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_clear_bit(nr, sk) \
+ clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_assign_bit(nr, sk, val) \
+ assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
+
#define UDP_MAX_SEGMENTS (1 << 6UL)
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
{
- udp_sk(sk)->no_check6_tx = val;
+ udp_assign_bit(NO_CHECK6_TX, sk, val);
}
static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
{
- udp_sk(sk)->no_check6_rx = val;
+ udp_assign_bit(NO_CHECK6_RX, sk, val);
}
-static inline bool udp_get_no_check6_tx(struct sock *sk)
+static inline bool udp_get_no_check6_tx(const struct sock *sk)
{
- return udp_sk(sk)->no_check6_tx;
+ return udp_test_bit(NO_CHECK6_TX, sk);
}
-static inline bool udp_get_no_check6_rx(struct sock *sk)
+static inline bool udp_get_no_check6_rx(const struct sock *sk)
{
- return udp_sk(sk)->no_check6_rx;
+ return udp_test_bit(NO_CHECK6_RX, sk);
}
static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
@@ -135,10 +145,12 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
if (!skb_is_gso(skb))
return false;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
+ !udp_test_bit(ACCEPT_L4, sk))
return true;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
+ !udp_test_bit(ACCEPT_FRAGLIST, sk))
return true;
return false;
@@ -146,8 +158,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
static inline void udp_allow_gso(struct sock *sk)
{
- udp_sk(sk)->accept_udp_l4 = 1;
- udp_sk(sk)->accept_udp_fraglist = 1;
+ udp_set_bit(ACCEPT_L4, sk);
+ udp_set_bit(ACCEPT_FRAGLIST, sk);
}
#define udp_portaddr_for_each_entry(__sk, list) \
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index c58453699ee9..ebb3ce63d64d 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -12,6 +12,7 @@
struct virtio_vsock_skb_cb {
bool reply;
bool tap_delivered;
+ u32 offset;
};
#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
@@ -159,6 +160,15 @@ struct virtio_transport {
/* Takes ownership of the packet */
int (*send_pkt)(struct sk_buff *skb);
+
+ /* Used in MSG_ZEROCOPY mode. Checks, that provided data
+ * (number of buffers) could be transmitted with zerocopy
+ * mode. If this callback is not implemented for the current
+ * transport - this means that this transport doesn't need
+ * extra checks and can perform zerocopy transmission by
+ * default.
+ */
+ bool (*can_msgzerocopy)(int bufs_num);
};
ssize_t
diff --git a/include/net/Space.h b/include/net/Space.h
index c29f3d51c078..ef42629f4258 100644
--- a/include/net/Space.h
+++ b/include/net/Space.h
@@ -10,4 +10,3 @@ struct net_device *smc_init(int unit);
struct net_device *cs89x0_probe(int unit);
struct net_device *tc515_probe(int unit);
struct net_device *lance_probe(int unit);
-struct net_device *cops_probe(int unit);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index aa90adc3b2a4..7ffa8c192c3f 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -541,7 +541,7 @@ static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
return ERR_PTR(-EFAULT);
}
- skb->priority = sk->sk_priority;
+ skb->priority = READ_ONCE(sk->sk_priority);
return skb;
}
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 29fd1b4ee654..fad8e36e3d98 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -150,6 +150,7 @@ struct devlink_port {
struct devlink_rate *devlink_rate;
struct devlink_linecard *linecard;
+ u32 rel_index;
};
struct devlink_port_new_attrs {
@@ -1697,6 +1698,8 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
u32 controller, u16 pf, u32 sf,
bool external);
+int devl_port_fn_devlink_set(struct devlink_port *devlink_port,
+ struct devlink *fn_devlink);
struct devlink_rate *
devl_rate_node_create(struct devlink *devlink, void *priv, char *node_name,
struct devlink_rate *parent);
@@ -1717,8 +1720,8 @@ void devlink_linecard_provision_clear(struct devlink_linecard *linecard);
void devlink_linecard_provision_fail(struct devlink_linecard *linecard);
void devlink_linecard_activate(struct devlink_linecard *linecard);
void devlink_linecard_deactivate(struct devlink_linecard *linecard);
-void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
- struct devlink *nested_devlink);
+int devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
+ struct devlink *nested_devlink);
int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
@@ -1918,6 +1921,8 @@ devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
void
devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
+int devl_nested_devlink_set(struct devlink *devlink,
+ struct devlink *nested_devlink);
bool devlink_is_reload_failed(const struct devlink *devlink);
void devlink_remote_reload_actions_performed(struct devlink *devlink,
enum devlink_reload_limit limit,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 0b9c6aa27047..d98439ea6146 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -969,6 +969,16 @@ struct dsa_switch_ops {
struct phy_device *phy);
void (*port_disable)(struct dsa_switch *ds, int port);
+
+ /*
+ * Notification for MAC address changes on user ports. Drivers can
+ * currently only veto operations. They should not use the method to
+ * program the hardware, since the operation is not rolled back in case
+ * of other errors.
+ */
+ int (*port_set_mac_address)(struct dsa_switch *ds, int port,
+ const unsigned char *addr);
+
/*
* Compatibility between device trees defining multiple CPU ports and
* drivers which are not OK to use by default the numerically smallest
@@ -1198,7 +1208,8 @@ struct dsa_switch_ops {
* HSR integration
*/
int (*port_hsr_join)(struct dsa_switch *ds, int port,
- struct net_device *hsr);
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack);
int (*port_hsr_leave)(struct dsa_switch *ds, int port,
struct net_device *hsr);
diff --git a/include/net/dst.h b/include/net/dst.h
index 78884429deed..f8b8599a0600 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -392,10 +392,10 @@ static inline int dst_discard(struct sk_buff *skb)
{
return dst_discard_out(&init_net, skb->sk, skb);
}
-void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
int initial_obsolete, unsigned short flags);
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
- struct net_device *dev, int initial_ref, int initial_obsolete,
+ struct net_device *dev, int initial_obsolete,
unsigned short flags);
struct dst_entry *dst_destroy(struct dst_entry *dst);
void dst_dev_put(struct dst_entry *dst);
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index c8490729b4ae..3e454c4d7ba6 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -89,7 +89,7 @@ struct ip6_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- struct in6_addr sl_addr[];
+ struct in6_addr sl_addr[] __counted_by(sl_max);
};
#define IP6_SFBLOCK 10 /* allocate this many at once */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 2de0e4d4a027..98e11958cdff 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -268,6 +268,16 @@ enum {
INET_FLAGS_NODEFRAG = 17,
INET_FLAGS_BIND_ADDRESS_NO_PORT = 18,
INET_FLAGS_DEFER_CONNECT = 19,
+ INET_FLAGS_MC6_LOOP = 20,
+ INET_FLAGS_RECVERR6_RFC4884 = 21,
+ INET_FLAGS_MC6_ALL = 22,
+ INET_FLAGS_AUTOFLOWLABEL_SET = 23,
+ INET_FLAGS_AUTOFLOWLABEL = 24,
+ INET_FLAGS_DONTFRAG = 25,
+ INET_FLAGS_RECVERR6 = 26,
+ INET_FLAGS_REPFLOW = 27,
+ INET_FLAGS_RTALERT_ISOLATE = 28,
+ INET_FLAGS_SNDFLOW = 29,
};
/* cmsg flags for inet */
diff --git a/include/net/ip.h b/include/net/ip.h
index 3489a1cca5e7..6fbc0dcf4b97 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -258,7 +258,7 @@ static inline u8 ip_sendmsg_scope(const struct inet_sock *inet,
static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
{
- return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+ return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos));
}
/* datagram.c */
@@ -434,19 +434,22 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
static inline bool ip_sk_accept_pmtu(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
- inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
+
+ return pmtudisc != IP_PMTUDISC_INTERFACE &&
+ pmtudisc != IP_PMTUDISC_OMIT;
}
static inline bool ip_sk_use_pmtu(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
+ return READ_ONCE(inet_sk(sk)->pmtudisc) < IP_PMTUDISC_PROBE;
}
static inline bool ip_sk_ignore_df(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
- inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
+
+ return pmtudisc < IP_PMTUDISC_DO || pmtudisc == IP_PMTUDISC_OMIT;
}
static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
@@ -807,6 +810,5 @@ int ip_sock_set_mtu_discover(struct sock *sk, int val);
void ip_sock_set_pktinfo(struct sock *sk);
void ip_sock_set_recverr(struct sock *sk);
void ip_sock_set_tos(struct sock *sk, int val);
-void __ip_sock_set_tos(struct sock *sk, int val);
#endif /* _IP_H */
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index b32539bb0fb0..28b065790261 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -53,13 +53,12 @@ struct route_info {
*/
static inline int rt6_srcprefs2flags(unsigned int srcprefs)
{
- /* No need to bitmask because srcprefs have only 3 bits. */
- return srcprefs << 3;
+ return (srcprefs & IPV6_PREFER_SRC_MASK) << 3;
}
static inline unsigned int rt6_flags2srcprefs(int flags)
{
- return (flags >> 3) & 7;
+ return (flags >> 3) & IPV6_PREFER_SRC_MASK;
}
static inline bool rt6_need_strict(const struct in6_addr *daddr)
@@ -266,7 +265,7 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
const struct dst_entry *dst = skb_dst(skb);
unsigned int mtu;
- if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
+ if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) {
mtu = READ_ONCE(dst->dev->mtu);
mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
} else {
@@ -277,14 +276,18 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
{
- return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE &&
- inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc);
+
+ return pmtudisc != IPV6_PMTUDISC_INTERFACE &&
+ pmtudisc != IPV6_PMTUDISC_OMIT;
}
static inline bool ip6_sk_ignore_df(const struct sock *sk)
{
- return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
- inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc);
+
+ return pmtudisc < IPV6_PMTUDISC_DO ||
+ pmtudisc == IPV6_PMTUDISC_OMIT;
}
static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 15de07d36540..d4667b7797e3 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -157,7 +157,7 @@ struct fib_info {
bool pfsrc_removed;
struct nexthop *nh;
struct rcu_head rcu;
- struct fib_nh fib_nh[];
+ struct fib_nh fib_nh[] __counted_by(fib_nhs);
};
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index c6932d1a3fa8..b3444c8a6f74 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -373,12 +373,12 @@ static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
}
static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
- const struct ipv6_pinfo *np)
+ const struct sock *sk)
{
*ipc6 = (struct ipcm6_cookie) {
.hlimit = -1,
- .tclass = np->tclass,
- .dontfrag = np->dontfrag,
+ .tclass = inet6_sk(sk)->tclass,
+ .dontfrag = inet6_test_bit(DONTFRAG, sk),
};
}
@@ -428,7 +428,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
int flags);
int ip6_flowlabel_init(void);
void ip6_flowlabel_cleanup(void);
-bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
+bool ip6_autoflowlabel(struct net *net, const struct sock *sk);
static inline void fl6_sock_release(struct ip6_flowlabel *fl)
{
@@ -914,9 +914,9 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
int hlimit;
if (ipv6_addr_is_multicast(&fl6->daddr))
- hlimit = np->mcast_hops;
+ hlimit = READ_ONCE(np->mcast_hops);
else
- hlimit = np->hop_limit;
+ hlimit = READ_ONCE(np->hop_limit);
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
return hlimit;
@@ -1303,15 +1303,16 @@ static inline int ip6_sock_set_v6only(struct sock *sk)
static inline void ip6_sock_set_recverr(struct sock *sk)
{
- lock_sock(sk);
- inet6_sk(sk)->recverr = true;
- release_sock(sk);
+ inet6_set_bit(RECVERR6, sk);
}
-static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
+#define IPV6_PREFER_SRC_MASK (IPV6_PREFER_SRC_TMP | IPV6_PREFER_SRC_PUBLIC | \
+ IPV6_PREFER_SRC_COA)
+
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
+ unsigned int prefmask = ~IPV6_PREFER_SRC_MASK;
unsigned int pref = 0;
- unsigned int prefmask = ~0;
/* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
switch (val & (IPV6_PREFER_SRC_PUBLIC |
@@ -1361,20 +1362,11 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
return -EINVAL;
}
- inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref;
+ WRITE_ONCE(inet6_sk(sk)->srcprefs,
+ (READ_ONCE(inet6_sk(sk)->srcprefs) & prefmask) | pref);
return 0;
}
-static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
-{
- int ret;
-
- lock_sock(sk);
- ret = __ip6_sock_set_addr_preferences(sk, val);
- release_sock(sk);
- return ret;
-}
-
static inline void ip6_sock_set_recvpktinfo(struct sock *sk)
{
lock_sock(sk);
diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h
index 3d3b5c881bc1..158b125692c2 100644
--- a/include/net/mana/hw_channel.h
+++ b/include/net/mana/hw_channel.h
@@ -121,7 +121,7 @@ struct hwc_dma_buf {
u32 gpa_mkey;
u32 num_reqs;
- struct hwc_work_request reqs[];
+ struct hwc_work_request reqs[] __counted_by(num_reqs);
};
typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id,
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 4d43adf18606..6e3e9c1363db 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -339,7 +339,7 @@ struct mana_rxq {
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
*/
- struct mana_recv_buf_oob rx_oobs[];
+ struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
};
struct mana_tx_qp {
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 7a41c4791536..d96d05b08819 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -132,6 +132,7 @@ struct netns_ipv4 {
u8 sysctl_tcp_syncookies;
u8 sysctl_tcp_migrate_req;
u8 sysctl_tcp_comp_sack_nr;
+ u8 sysctl_tcp_backlog_ack_defer;
int sysctl_tcp_reordering;
u8 sysctl_tcp_retries1;
u8 sysctl_tcp_retries2;
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 15960564e0c3..9fa1d0794dfa 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -20,10 +20,10 @@ struct qdisc_walker {
int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
};
-static inline void *qdisc_priv(struct Qdisc *q)
-{
- return &q->privdata;
-}
+#define qdisc_priv(q) \
+ _Generic(q, \
+ const struct Qdisc * : (const void *)&q->privdata, \
+ struct Qdisc * : (void *)&q->privdata)
static inline struct Qdisc *qdisc_from_priv(void *priv)
{
diff --git a/include/net/route.h b/include/net/route.h
index 51a45b1887b5..5c248a8e3d0e 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -37,7 +37,7 @@
#define RTO_ONLINK 0x01
-#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+#define RT_CONN_FLAGS(sk) (RT_TOS(READ_ONCE(inet_sk(sk)->tos)) | sock_flag(sk, SOCK_LOCALROUTE))
#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
static inline __u8 ip_sock_rt_scope(const struct sock *sk)
@@ -50,7 +50,7 @@ static inline __u8 ip_sock_rt_scope(const struct sock *sk)
static inline __u8 ip_sock_rt_tos(const struct sock *sk)
{
- return RT_TOS(inet_sk(sk)->tos);
+ return RT_TOS(READ_ONCE(inet_sk(sk)->tos));
}
struct ip_tunnel_info;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f232512505f8..c7318c73cfd6 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -587,6 +587,7 @@ static inline void sch_tree_unlock(struct Qdisc *q)
extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
+extern const u8 sch_default_prio2band[TC_PRIO_MAX + 1];
extern struct Qdisc_ops mq_qdisc_ops;
extern struct Qdisc_ops noqueue_qdisc_ops;
extern const struct Qdisc_ops *default_qdisc_ops;
diff --git a/include/net/sock.h b/include/net/sock.h
index b770261fbdaf..01f0005cb7d8 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1823,12 +1823,11 @@ static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
static inline void sock_release_ownership(struct sock *sk)
{
- if (sock_owned_by_user_nocheck(sk)) {
- sk->sk_lock.owned = 0;
+ DEBUG_NET_WARN_ON_ONCE(!sock_owned_by_user_nocheck(sk));
+ sk->sk_lock.owned = 0;
- /* The sk_lock has mutex_unlock() semantics: */
- mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
- }
+ /* The sk_lock has mutex_unlock() semantics: */
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
}
/* no reclassification while locks are held */
@@ -2008,21 +2007,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
/* sk_tx_queue_mapping accept only upto a 16-bit value */
if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
return;
- sk->sk_tx_queue_mapping = tx_queue;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
}
#define NO_QUEUE_MAPPING USHRT_MAX
static inline void sk_tx_queue_clear(struct sock *sk)
{
- sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
}
static inline int sk_tx_queue_get(const struct sock *sk)
{
- if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_tx_queue_mapping;
+ if (sk) {
+ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
+ * and sk_tx_queue_set().
+ */
+ int val = READ_ONCE(sk->sk_tx_queue_mapping);
+ if (val != NO_QUEUE_MAPPING)
+ return val;
+ }
return -1;
}
@@ -2142,14 +2153,14 @@ static inline bool sk_rethink_txhash(struct sock *sk)
}
static inline struct dst_entry *
-__sk_dst_get(struct sock *sk)
+__sk_dst_get(const struct sock *sk)
{
return rcu_dereference_check(sk->sk_dst_cache,
lockdep_sock_is_held(sk));
}
static inline struct dst_entry *
-sk_dst_get(struct sock *sk)
+sk_dst_get(const struct sock *sk)
{
struct dst_entry *dst;
@@ -2171,7 +2182,7 @@ static inline void __dst_negative_advice(struct sock *sk)
if (ndst != dst) {
rcu_assign_pointer(sk->sk_dst_cache, ndst);
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
}
}
}
@@ -2188,7 +2199,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
old_dst = rcu_dereference_protected(sk->sk_dst_cache,
lockdep_sock_is_held(sk));
rcu_assign_pointer(sk->sk_dst_cache, dst);
@@ -2201,7 +2212,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
dst_release(old_dst);
}
@@ -2239,7 +2250,7 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
}
}
-bool sk_mc_loop(struct sock *sk);
+bool sk_mc_loop(const struct sock *sk);
static inline bool sk_can_gso(const struct sock *sk)
{
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7b1a720691ae..9eb0a2855311 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -720,8 +720,10 @@ static inline void tcp_fast_path_check(struct sock *sk)
tcp_fast_path_on(tp);
}
+u32 tcp_delack_max(const struct sock *sk);
+
/* Compute the actual rto_min value */
-static inline u32 tcp_rto_min(struct sock *sk)
+static inline u32 tcp_rto_min(const struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = inet_csk(sk)->icsk_rto_min;
@@ -731,7 +733,7 @@ static inline u32 tcp_rto_min(struct sock *sk)
return rto_min;
}
-static inline u32 tcp_rto_min_us(struct sock *sk)
+static inline u32 tcp_rto_min_us(const struct sock *sk)
{
return jiffies_to_usecs(tcp_rto_min(sk));
}
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 0ca9b7a11baf..21ba0a25f936 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -154,8 +154,9 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
- struct net_device *dev, struct in6_addr *saddr,
- struct in6_addr *daddr,
+ struct net_device *dev,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be32 label,
__be16 src_port, __be16 dst_port, bool nocheck);
@@ -174,16 +175,13 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
}
#endif
-static inline void udp_tunnel_encap_enable(struct socket *sock)
+static inline void udp_tunnel_encap_enable(struct sock *sk)
{
- struct udp_sock *up = udp_sk(sock->sk);
-
- if (up->encap_enabled)
+ if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
return;
- up->encap_enabled = 1;
#if IS_ENABLED(CONFIG_IPV6)
- if (sock->sk->sk_family == PF_INET6)
+ if (READ_ONCE(sk->sk_family) == PF_INET6)
ipv6_stub->udpv6_encap_enable();
#endif
udp_encap_enable();
diff --git a/include/net/udplite.h b/include/net/udplite.h
index bd33ff2b8f42..786919d29f8d 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -66,14 +66,18 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
- const struct udp_sock *up = udp_sk(skb->sk);
const int off = skb_transport_offset(skb);
+ const struct sock *sk = skb->sk;
int len = skb->len - off;
- if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
- if (0 < up->pcslen)
- len = up->pcslen;
- udp_hdr(skb)->len = htons(up->pcslen);
+ if (udp_test_bit(UDPLITE_SEND_CC, sk)) {
+ u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen);
+
+ if (pcslen < len) {
+ if (pcslen > 0)
+ len = pcslen;
+ udp_hdr(skb)->len = htons(pcslen);
+ }
}
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
diff --git a/include/net/xdp.h b/include/net/xdp.h
index de08c8e0d134..349c36fb5fd8 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -383,14 +383,25 @@ void xdp_attachment_setup(struct xdp_attachment_info *info,
#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
+/* Define the relationship between xdp-rx-metadata kfunc and
+ * various other entities:
+ * - xdp_rx_metadata enum
+ * - netdev netlink enum (Documentation/netlink/specs/netdev.yaml)
+ * - kfunc name
+ * - xdp_metadata_ops field
+ */
#define XDP_METADATA_KFUNC_xxx \
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
- bpf_xdp_metadata_rx_timestamp) \
+ NETDEV_XDP_RX_METADATA_TIMESTAMP, \
+ bpf_xdp_metadata_rx_timestamp, \
+ xmo_rx_timestamp) \
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
- bpf_xdp_metadata_rx_hash) \
+ NETDEV_XDP_RX_METADATA_HASH, \
+ bpf_xdp_metadata_rx_hash, \
+ xmo_rx_hash) \
-enum {
-#define XDP_METADATA_KFUNC(name, _) name,
+enum xdp_rx_metadata {
+#define XDP_METADATA_KFUNC(name, _, __, ___) name,
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
MAX_XDP_METADATA_KFUNC,
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 1617af380162..69b472604b86 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -14,6 +14,8 @@
#include <linux/mm.h>
#include <net/sock.h>
+#define XDP_UMEM_SG_FLAG (1 << 1)
+
struct net_device;
struct xsk_queue;
struct xdp_buff;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 363c7d510554..98d7aa78adda 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -2166,7 +2166,7 @@ static inline bool xfrm6_local_dontfrag(const struct sock *sk)
proto = sk->sk_protocol;
if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
- return inet6_sk(sk)->dontfrag;
+ return inet6_test_bit(DONTFRAG, sk);
return false;
}
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 563e48617374..09e72215b9f9 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -44,7 +44,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
ssk = mptcp_subflow_tcp_sock(subflow);
if (ssk && sk_fullsock(ssk)) {
__entry->snd_wnd = tcp_sk(ssk)->snd_wnd;
- __entry->pace = ssk->sk_pacing_rate;
+ __entry->pace = READ_ONCE(ssk->sk_pacing_rate);
} else {
__entry->snd_wnd = 0;
__entry->pace = 0;
diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h
index d0b3f0ea9ba1..f1ebe36787c3 100644
--- a/include/trace/events/vsock_virtio_transport_common.h
+++ b/include/trace/events/vsock_virtio_transport_common.h
@@ -43,7 +43,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__u32 len,
__u16 type,
__u16 op,
- __u32 flags
+ __u32 flags,
+ bool zcopy
),
TP_ARGS(
src_cid, src_port,
@@ -51,7 +52,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
len,
type,
op,
- flags
+ flags,
+ zcopy
),
TP_STRUCT__entry(
__field(__u32, src_cid)
@@ -62,6 +64,7 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__field(__u16, type)
__field(__u16, op)
__field(__u32, flags)
+ __field(bool, zcopy)
),
TP_fast_assign(
__entry->src_cid = src_cid;
@@ -72,14 +75,15 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__entry->type = type;
__entry->op = op;
__entry->flags = flags;
+ __entry->zcopy = zcopy;
),
- TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x",
+ TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s",
__entry->src_cid, __entry->src_port,
__entry->dst_cid, __entry->dst_port,
__entry->len,
show_type(__entry->type),
show_op(__entry->op),
- __entry->flags)
+ __entry->flags, __entry->zcopy ? "true" : "false")
);
TRACE_EVENT(virtio_transport_recv_pkt,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 0448700890f7..5f13db15a3c7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -932,7 +932,14 @@ enum bpf_map_type {
*/
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
- BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
+ /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
+ * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
+ * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * deprecated.
+ */
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 03875e078be8..cd4b82458d1b 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -680,6 +680,7 @@ enum devlink_port_function_attr {
DEVLINK_PORT_FN_ATTR_STATE, /* u8 */
DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */
DEVLINK_PORT_FN_ATTR_CAPS, /* bitfield32 */
+ DEVLINK_PORT_FN_ATTR_DEVLINK, /* nested */
__DEVLINK_PORT_FUNCTION_ATTR_MAX,
DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
new file mode 100644
index 000000000000..20ef0718f8dc
--- /dev/null
+++ b/include/uapi/linux/dpll.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/dpll.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_DPLL_H
+#define _UAPI_LINUX_DPLL_H
+
+#define DPLL_FAMILY_NAME "dpll"
+#define DPLL_FAMILY_VERSION 1
+
+/**
+ * enum dpll_mode - working modes a dpll can support, differentiates if and how
+ * dpll selects one of its inputs to syntonize with it, valid values for
+ * DPLL_A_MODE attribute
+ * @DPLL_MODE_MANUAL: input can be only selected by sending a request to dpll
+ * @DPLL_MODE_AUTOMATIC: highest prio input pin auto selected by dpll
+ */
+enum dpll_mode {
+ DPLL_MODE_MANUAL = 1,
+ DPLL_MODE_AUTOMATIC,
+
+ /* private: */
+ __DPLL_MODE_MAX,
+ DPLL_MODE_MAX = (__DPLL_MODE_MAX - 1)
+};
+
+/**
+ * enum dpll_lock_status - provides information of dpll device lock status,
+ * valid values for DPLL_A_LOCK_STATUS attribute
+ * @DPLL_LOCK_STATUS_UNLOCKED: dpll was not yet locked to any valid input (or
+ * forced by setting DPLL_A_MODE to DPLL_MODE_DETACHED)
+ * @DPLL_LOCK_STATUS_LOCKED: dpll is locked to a valid signal, but no holdover
+ * available
+ * @DPLL_LOCK_STATUS_LOCKED_HO_ACQ: dpll is locked and holdover acquired
+ * @DPLL_LOCK_STATUS_HOLDOVER: dpll is in holdover state - lost a valid lock or
+ * was forced by disconnecting all the pins (latter possible only when dpll
+ * lock-state was already DPLL_LOCK_STATUS_LOCKED_HO_ACQ, if dpll lock-state
+ * was not DPLL_LOCK_STATUS_LOCKED_HO_ACQ, the dpll's lock-state shall remain
+ * DPLL_LOCK_STATUS_UNLOCKED)
+ */
+enum dpll_lock_status {
+ DPLL_LOCK_STATUS_UNLOCKED = 1,
+ DPLL_LOCK_STATUS_LOCKED,
+ DPLL_LOCK_STATUS_LOCKED_HO_ACQ,
+ DPLL_LOCK_STATUS_HOLDOVER,
+
+ /* private: */
+ __DPLL_LOCK_STATUS_MAX,
+ DPLL_LOCK_STATUS_MAX = (__DPLL_LOCK_STATUS_MAX - 1)
+};
+
+#define DPLL_TEMP_DIVIDER 1000
+
+/**
+ * enum dpll_type - type of dpll, valid values for DPLL_A_TYPE attribute
+ * @DPLL_TYPE_PPS: dpll produces Pulse-Per-Second signal
+ * @DPLL_TYPE_EEC: dpll drives the Ethernet Equipment Clock
+ */
+enum dpll_type {
+ DPLL_TYPE_PPS = 1,
+ DPLL_TYPE_EEC,
+
+ /* private: */
+ __DPLL_TYPE_MAX,
+ DPLL_TYPE_MAX = (__DPLL_TYPE_MAX - 1)
+};
+
+/**
+ * enum dpll_pin_type - defines possible types of a pin, valid values for
+ * DPLL_A_PIN_TYPE attribute
+ * @DPLL_PIN_TYPE_MUX: aggregates another layer of selectable pins
+ * @DPLL_PIN_TYPE_EXT: external input
+ * @DPLL_PIN_TYPE_SYNCE_ETH_PORT: ethernet port PHY's recovered clock
+ * @DPLL_PIN_TYPE_INT_OSCILLATOR: device internal oscillator
+ * @DPLL_PIN_TYPE_GNSS: GNSS recovered clock
+ */
+enum dpll_pin_type {
+ DPLL_PIN_TYPE_MUX = 1,
+ DPLL_PIN_TYPE_EXT,
+ DPLL_PIN_TYPE_SYNCE_ETH_PORT,
+ DPLL_PIN_TYPE_INT_OSCILLATOR,
+ DPLL_PIN_TYPE_GNSS,
+
+ /* private: */
+ __DPLL_PIN_TYPE_MAX,
+ DPLL_PIN_TYPE_MAX = (__DPLL_PIN_TYPE_MAX - 1)
+};
+
+/**
+ * enum dpll_pin_direction - defines possible direction of a pin, valid values
+ * for DPLL_A_PIN_DIRECTION attribute
+ * @DPLL_PIN_DIRECTION_INPUT: pin used as a input of a signal
+ * @DPLL_PIN_DIRECTION_OUTPUT: pin used to output the signal
+ */
+enum dpll_pin_direction {
+ DPLL_PIN_DIRECTION_INPUT = 1,
+ DPLL_PIN_DIRECTION_OUTPUT,
+
+ /* private: */
+ __DPLL_PIN_DIRECTION_MAX,
+ DPLL_PIN_DIRECTION_MAX = (__DPLL_PIN_DIRECTION_MAX - 1)
+};
+
+#define DPLL_PIN_FREQUENCY_1_HZ 1
+#define DPLL_PIN_FREQUENCY_10_KHZ 10000
+#define DPLL_PIN_FREQUENCY_77_5_KHZ 77500
+#define DPLL_PIN_FREQUENCY_10_MHZ 10000000
+
+/**
+ * enum dpll_pin_state - defines possible states of a pin, valid values for
+ * DPLL_A_PIN_STATE attribute
+ * @DPLL_PIN_STATE_CONNECTED: pin connected, active input of phase locked loop
+ * @DPLL_PIN_STATE_DISCONNECTED: pin disconnected, not considered as a valid
+ * input
+ * @DPLL_PIN_STATE_SELECTABLE: pin enabled for automatic input selection
+ */
+enum dpll_pin_state {
+ DPLL_PIN_STATE_CONNECTED = 1,
+ DPLL_PIN_STATE_DISCONNECTED,
+ DPLL_PIN_STATE_SELECTABLE,
+
+ /* private: */
+ __DPLL_PIN_STATE_MAX,
+ DPLL_PIN_STATE_MAX = (__DPLL_PIN_STATE_MAX - 1)
+};
+
+/**
+ * enum dpll_pin_capabilities - defines possible capabilities of a pin, valid
+ * flags on DPLL_A_PIN_CAPABILITIES attribute
+ * @DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE: pin direction can be changed
+ * @DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE: pin priority can be changed
+ * @DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE: pin state can be changed
+ */
+enum dpll_pin_capabilities {
+ DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE = 1,
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE = 2,
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE = 4,
+};
+
+enum dpll_a {
+ DPLL_A_ID = 1,
+ DPLL_A_MODULE_NAME,
+ DPLL_A_PAD,
+ DPLL_A_CLOCK_ID,
+ DPLL_A_MODE,
+ DPLL_A_MODE_SUPPORTED,
+ DPLL_A_LOCK_STATUS,
+ DPLL_A_TEMP,
+ DPLL_A_TYPE,
+
+ __DPLL_A_MAX,
+ DPLL_A_MAX = (__DPLL_A_MAX - 1)
+};
+
+enum dpll_a_pin {
+ DPLL_A_PIN_ID = 1,
+ DPLL_A_PIN_PARENT_ID,
+ DPLL_A_PIN_MODULE_NAME,
+ DPLL_A_PIN_PAD,
+ DPLL_A_PIN_CLOCK_ID,
+ DPLL_A_PIN_BOARD_LABEL,
+ DPLL_A_PIN_PANEL_LABEL,
+ DPLL_A_PIN_PACKAGE_LABEL,
+ DPLL_A_PIN_TYPE,
+ DPLL_A_PIN_DIRECTION,
+ DPLL_A_PIN_FREQUENCY,
+ DPLL_A_PIN_FREQUENCY_SUPPORTED,
+ DPLL_A_PIN_FREQUENCY_MIN,
+ DPLL_A_PIN_FREQUENCY_MAX,
+ DPLL_A_PIN_PRIO,
+ DPLL_A_PIN_STATE,
+ DPLL_A_PIN_CAPABILITIES,
+ DPLL_A_PIN_PARENT_DEVICE,
+ DPLL_A_PIN_PARENT_PIN,
+
+ __DPLL_A_PIN_MAX,
+ DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
+};
+
+enum dpll_cmd {
+ DPLL_CMD_DEVICE_ID_GET = 1,
+ DPLL_CMD_DEVICE_GET,
+ DPLL_CMD_DEVICE_SET,
+ DPLL_CMD_DEVICE_CREATE_NTF,
+ DPLL_CMD_DEVICE_DELETE_NTF,
+ DPLL_CMD_DEVICE_CHANGE_NTF,
+ DPLL_CMD_PIN_ID_GET,
+ DPLL_CMD_PIN_GET,
+ DPLL_CMD_PIN_SET,
+ DPLL_CMD_PIN_CREATE_NTF,
+ DPLL_CMD_PIN_DELETE_NTF,
+ DPLL_CMD_PIN_CHANGE_NTF,
+
+ __DPLL_CMD_MAX,
+ DPLL_CMD_MAX = (__DPLL_CMD_MAX - 1)
+};
+
+#define DPLL_MCGRP_MONITOR "monitor"
+
+#endif /* _UAPI_LINUX_DPLL_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index ce3117df9cec..fac351a93aed 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -376,7 +376,7 @@ enum {
IFLA_GSO_IPV4_MAX_SIZE,
IFLA_GRO_IPV4_MAX_SIZE,
-
+ IFLA_DPLL_PIN,
__IFLA_MAX
};
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index c1634b95c223..2943a151d4f1 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -38,11 +38,27 @@ enum netdev_xdp_act {
NETDEV_XDP_ACT_MASK = 127,
};
+/**
+ * enum netdev_xdp_rx_metadata
+ * @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW
+ * timestamp via bpf_xdp_metadata_rx_timestamp().
+ * @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
+ * hash via bpf_xdp_metadata_rx_hash().
+ */
+enum netdev_xdp_rx_metadata {
+ NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
+ NETDEV_XDP_RX_METADATA_HASH = 2,
+
+ /* private: */
+ NETDEV_XDP_RX_METADATA_MASK = 3,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
NETDEV_A_DEV_XDP_FEATURES,
NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
+ NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
__NETDEV_A_DEV_MAX,
NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 3f85ae578056..f762a10bfb78 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -941,15 +941,22 @@ enum {
TCA_FQ_HORIZON_DROP, /* drop packets beyond horizon, or cap their EDT */
+ TCA_FQ_PRIOMAP, /* prio2band */
+
+ TCA_FQ_WEIGHTS, /* Weights for each band */
+
__TCA_FQ_MAX
};
#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
+#define FQ_BANDS 3
+#define FQ_MIN_WEIGHT 16384
+
struct tc_fq_qd_stats {
__u64 gc_flows;
- __u64 highprio_packets;
- __u64 tcp_retrans;
+ __u64 highprio_packets; /* obsolete */
+ __u64 tcp_retrans; /* obsolete */
__u64 throttled;
__u64 flows_plimit;
__u64 pkts_too_long;
@@ -962,6 +969,10 @@ struct tc_fq_qd_stats {
__u64 ce_mark; /* packets above ce_threshold */
__u64 horizon_drops;
__u64 horizon_caps;
+ __u64 fastpath_packets;
+ __u64 band_drops[FQ_BANDS];
+ __u32 band_pkt_count[FQ_BANDS];
+ __u32 pad;
};
/* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 879eeb0a084b..d1d08da6331a 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -289,6 +289,18 @@ struct tcp_info {
*/
__u32 tcpi_rehash; /* PLB or timeout triggered rehash attempts */
+
+ __u16 tcpi_total_rto; /* Total number of RTO timeouts, including
+ * SYN/SYN-ACK and recurring timeouts.
+ */
+ __u16 tcpi_total_rto_recoveries; /* Total number of RTO
+ * recoveries, including any
+ * unfinished recovery.
+ */
+ __u32 tcpi_total_rto_time; /* Total time spent in RTO recoveries
+ * in milliseconds, including any
+ * unfinished recovery.
+ */
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index fdc3e8705a3c..db6176fb64dc 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -615,7 +615,10 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map)
if (st_map->links)
bpf_struct_ops_map_put_progs(st_map);
bpf_map_area_free(st_map->links);
- bpf_jit_free_exec(st_map->image);
+ if (st_map->image) {
+ bpf_jit_free_exec(st_map->image);
+ bpf_jit_uncharge_modmem(PAGE_SIZE);
+ }
bpf_map_area_free(st_map->uvalue);
bpf_map_area_free(st_map);
}
@@ -657,6 +660,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
struct bpf_struct_ops_map *st_map;
const struct btf_type *t, *vt;
struct bpf_map *map;
+ int ret;
st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
if (!st_ops)
@@ -681,12 +685,27 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
st_map->st_ops = st_ops;
map = &st_map->map;
+ ret = bpf_jit_charge_modmem(PAGE_SIZE);
+ if (ret) {
+ __bpf_struct_ops_map_free(map);
+ return ERR_PTR(ret);
+ }
+
+ st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
+ if (!st_map->image) {
+ /* __bpf_struct_ops_map_free() uses st_map->image as flag
+ * for "charged or not". In this case, we need to unchange
+ * here.
+ */
+ bpf_jit_uncharge_modmem(PAGE_SIZE);
+ __bpf_struct_ops_map_free(map);
+ return ERR_PTR(-ENOMEM);
+ }
st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
st_map->links =
bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
NUMA_NO_NODE);
- st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
- if (!st_map->uvalue || !st_map->links || !st_map->image) {
+ if (!st_map->uvalue || !st_map->links) {
__bpf_struct_ops_map_free(map);
return ERR_PTR(-ENOMEM);
}
@@ -907,4 +926,3 @@ err_out:
kfree(link);
return err;
}
-
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 8090d7fb11ef..69101200c124 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3293,6 +3293,8 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
type = BPF_KPTR_UNREF;
else if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off)))
type = BPF_KPTR_REF;
+ else if (!strcmp("percpu_kptr", __btf_name_by_offset(btf, t->name_off)))
+ type = BPF_KPTR_PERCPU;
else
return -EINVAL;
@@ -3308,10 +3310,10 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
return BTF_FIELD_FOUND;
}
-static const char *btf_find_decl_tag_value(const struct btf *btf,
- const struct btf_type *pt,
- int comp_idx, const char *tag_key)
+const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key)
{
+ const char *value = NULL;
int i;
for (i = 1; i < btf_nr_types(btf); i++) {
@@ -3325,9 +3327,14 @@ static const char *btf_find_decl_tag_value(const struct btf *btf,
continue;
if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
continue;
- return __btf_name_by_offset(btf, t->name_off) + len;
+ /* Prevent duplicate entries for same type */
+ if (value)
+ return ERR_PTR(-EEXIST);
+ value = __btf_name_by_offset(btf, t->name_off) + len;
}
- return NULL;
+ if (!value)
+ return ERR_PTR(-ENOENT);
+ return value;
}
static int
@@ -3345,7 +3352,7 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
if (t->size != sz)
return BTF_FIELD_IGNORE;
value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
- if (!value_type)
+ if (IS_ERR(value_type))
return -EINVAL;
node_field_name = strstr(value_type, ":");
if (!node_field_name)
@@ -3457,6 +3464,7 @@ static int btf_find_struct_field(const struct btf *btf,
break;
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
ret = btf_find_kptr(btf, member_type, off, sz,
idx < info_cnt ? &info[idx] : &tmp);
if (ret < 0)
@@ -3523,6 +3531,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
break;
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
ret = btf_find_kptr(btf, var_type, off, sz,
idx < info_cnt ? &info[idx] : &tmp);
if (ret < 0)
@@ -3783,6 +3792,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
break;
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
if (ret < 0)
goto end;
@@ -6949,7 +6959,7 @@ int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
* (either PTR_TO_CTX or SCALAR_VALUE).
*/
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs)
+ struct bpf_reg_state *regs, bool is_ex_cb)
{
struct bpf_verifier_log *log = &env->log;
struct bpf_prog *prog = env->prog;
@@ -7006,7 +7016,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
tname, nargs, MAX_BPF_FUNC_REG_ARGS);
return -EINVAL;
}
- /* check that function returns int */
+ /* check that function returns int, exception cb also requires this */
t = btf_type_by_id(btf, t->type);
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
@@ -7055,6 +7065,14 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
i, btf_type_str(t), tname);
return -EINVAL;
}
+ /* We have already ensured that the callback returns an integer, just
+ * like all global subprogs. We need to determine it only has a single
+ * scalar argument.
+ */
+ if (is_ex_cb && (nargs != 1 || regs[BPF_REG_1].type != SCALAR_VALUE)) {
+ bpf_log(log, "exception cb only supports single integer argument\n");
+ return -EINVAL;
+ }
return 0;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 4e3ce0542e31..08626b519ce2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -64,8 +64,8 @@
#define OFF insn->off
#define IMM insn->imm
-struct bpf_mem_alloc bpf_global_ma;
-bool bpf_global_ma_set;
+struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
+bool bpf_global_ma_set, bpf_global_percpu_ma_set;
/* No hurry in this branch
*
@@ -212,7 +212,7 @@ void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
const struct bpf_line_info *linfo;
void **jited_linfo;
- if (!prog->aux->jited_linfo)
+ if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
/* Userspace did not provide linfo */
return;
@@ -539,7 +539,7 @@ static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
{
int i;
- for (i = 0; i < fp->aux->func_cnt; i++)
+ for (i = 0; i < fp->aux->real_func_cnt; i++)
bpf_prog_kallsyms_del(fp->aux->func[i]);
}
@@ -589,7 +589,7 @@ bpf_prog_ksym_set_name(struct bpf_prog *prog)
sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
/* prog->aux->name will be ignored if full btf name is available */
- if (prog->aux->func_info_cnt) {
+ if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
type = btf_type_by_id(prog->aux->btf,
prog->aux->func_info[prog->aux->func_idx].type_id);
func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
@@ -623,7 +623,11 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
if (val < ksym->start)
return -1;
- if (val >= ksym->end)
+ /* Ensure that we detect return addresses as part of the program, when
+ * the final instruction is a call for a program part of the stack
+ * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
+ */
+ if (val > ksym->end)
return 1;
return 0;
@@ -733,7 +737,7 @@ bool is_bpf_text_address(unsigned long addr)
return ret;
}
-static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
{
struct bpf_ksym *ksym = bpf_ksym_find(addr);
@@ -1208,7 +1212,7 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
if (!extra_pass)
addr = NULL;
else if (prog->aux->func &&
- off >= 0 && off < prog->aux->func_cnt)
+ off >= 0 && off < prog->aux->real_func_cnt)
addr = (u8 *)prog->aux->func[off]->bpf_func;
else
return -EINVAL;
@@ -2721,7 +2725,7 @@ static void bpf_prog_free_deferred(struct work_struct *work)
#endif
if (aux->dst_trampoline)
bpf_trampoline_put(aux->dst_trampoline);
- for (i = 0; i < aux->func_cnt; i++) {
+ for (i = 0; i < aux->real_func_cnt; i++) {
/* We can just unlink the subprog poke descriptor table as
* it was originally linked to the main program and is also
* released along with it.
@@ -2729,7 +2733,7 @@ static void bpf_prog_free_deferred(struct work_struct *work)
aux->func[i]->aux->poke_tab = NULL;
bpf_jit_free(aux->func[i]);
}
- if (aux->func_cnt) {
+ if (aux->real_func_cnt) {
kfree(aux->func);
bpf_prog_unlock_free(aux->prog);
} else {
@@ -2914,6 +2918,15 @@ int __weak bpf_arch_text_invalidate(void *dst, size_t len)
return -ENOTSUPP;
}
+bool __weak bpf_jit_supports_exceptions(void)
+{
+ return false;
+}
+
+void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
+{
+}
+
#ifdef CONFIG_BPF_SYSCALL
static int __init bpf_global_ma_init(void)
{
@@ -2921,7 +2934,9 @@ static int __init bpf_global_ma_init(void)
ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
bpf_global_ma_set = !ret;
- return ret;
+ ret = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true);
+ bpf_global_percpu_ma_set = !ret;
+ return !bpf_global_ma_set || !bpf_global_percpu_ma_set;
}
late_initcall(bpf_global_ma_init);
#endif
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 8bd3812fb8df..dd1c69ee3375 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -22,6 +22,7 @@
#include <linux/security.h>
#include <linux/btf_ids.h>
#include <linux/bpf_mem_alloc.h>
+#include <linux/kasan.h>
#include "../../lib/kstrtox.h"
@@ -1902,6 +1903,14 @@ __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
return p;
}
+__bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
+{
+ u64 size = local_type_id__k;
+
+ /* The verifier has ensured that meta__ign must be NULL */
+ return bpf_mem_alloc(&bpf_global_percpu_ma, size);
+}
+
/* Must be called under migrate_disable(), as required by bpf_mem_free */
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec)
{
@@ -1930,6 +1939,12 @@ __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
__bpf_obj_drop_impl(p, meta ? meta->record : NULL);
}
+__bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
+{
+ /* The verifier has ensured that meta__ign must be NULL */
+ bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
+}
+
__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
{
struct btf_struct_meta *meta = meta__ign;
@@ -2435,6 +2450,49 @@ __bpf_kfunc void bpf_rcu_read_unlock(void)
rcu_read_unlock();
}
+struct bpf_throw_ctx {
+ struct bpf_prog_aux *aux;
+ u64 sp;
+ u64 bp;
+ int cnt;
+};
+
+static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
+{
+ struct bpf_throw_ctx *ctx = cookie;
+ struct bpf_prog *prog;
+
+ if (!is_bpf_text_address(ip))
+ return !ctx->cnt;
+ prog = bpf_prog_ksym_find(ip);
+ ctx->cnt++;
+ if (bpf_is_subprog(prog))
+ return true;
+ ctx->aux = prog->aux;
+ ctx->sp = sp;
+ ctx->bp = bp;
+ return false;
+}
+
+__bpf_kfunc void bpf_throw(u64 cookie)
+{
+ struct bpf_throw_ctx ctx = {};
+
+ arch_bpf_stack_walk(bpf_stack_walker, &ctx);
+ WARN_ON_ONCE(!ctx.aux);
+ if (ctx.aux)
+ WARN_ON_ONCE(!ctx.aux->exception_boundary);
+ WARN_ON_ONCE(!ctx.bp);
+ WARN_ON_ONCE(!ctx.cnt);
+ /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
+ * deeper stack depths than ctx.sp as we do not return from bpf_throw,
+ * which skips compiler generated instrumentation to do the same.
+ */
+ kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
+ ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp);
+ WARN(1, "A call to BPF exception callback should never return\n");
+}
+
__diag_pop();
BTF_SET8_START(generic_btf_ids)
@@ -2442,7 +2500,9 @@ BTF_SET8_START(generic_btf_ids)
BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
#endif
BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_list_push_front_impl)
BTF_ID_FLAGS(func, bpf_list_push_back_impl)
@@ -2462,6 +2522,7 @@ BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
#endif
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_throw)
BTF_SET8_END(generic_btf_ids)
static const struct btf_kfunc_id_set generic_kfunc_set = {
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index d93ddac283d4..39ea316c55e7 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -526,15 +526,16 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
struct bpf_mem_cache *c, __percpu *pc;
struct obj_cgroup *objcg = NULL;
+ /* room for llist_node and per-cpu pointer */
+ if (percpu)
+ percpu_size = LLIST_NODE_SZ + sizeof(void *);
+
if (size) {
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
if (!pc)
return -ENOMEM;
- if (percpu)
- /* room for llist_node and per-cpu pointer */
- percpu_size = LLIST_NODE_SZ + sizeof(void *);
- else
+ if (!percpu)
size += LLIST_NODE_SZ; /* room for llist_node */
unit_size = size;
@@ -555,10 +556,6 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
return 0;
}
- /* size == 0 && percpu is an invalid combination */
- if (WARN_ON_ONCE(percpu))
- return -EINVAL;
-
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
if (!pcc)
return -ENOMEM;
@@ -572,6 +569,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c = &cc->cache[i];
c->unit_size = sizes[i];
c->objcg = objcg;
+ c->percpu_size = percpu_size;
c->tgt = c;
init_refill_work(c);
@@ -782,12 +780,17 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c)
}
}
local_dec(&c->active);
- local_irq_restore(flags);
WARN_ON(cnt < 0);
if (cnt < c->low_watermark)
irq_work_raise(c);
+ /* Enable IRQ after the enqueue of irq work completes, so irq work
+ * will run after IRQ is enabled and free_llist may be refilled by
+ * irq work before other task preempts current task.
+ */
+ local_irq_restore(flags);
+
return llnode;
}
@@ -823,11 +826,16 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
llist_add(llnode, &c->free_llist_extra);
}
local_dec(&c->active);
- local_irq_restore(flags);
if (cnt > c->high_watermark)
/* free few objects from current cpu into global kmalloc pool */
irq_work_raise(c);
+ /* Enable IRQ after irq_work_raise() completes, otherwise when current
+ * task is preempted by task which does unit_alloc(), unit_alloc() may
+ * return NULL unexpectedly because irq work is already pending but can
+ * not been triggered and free_llist can not be refilled timely.
+ */
+ local_irq_restore(flags);
}
static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
@@ -845,10 +853,10 @@ static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
llist_add(llnode, &c->free_llist_extra_rcu);
}
local_dec(&c->active);
- local_irq_restore(flags);
if (!atomic_read(&c->call_rcu_in_progress))
irq_work_raise(c);
+ local_irq_restore(flags);
}
/* Called from BPF program or from sys_bpf syscall.
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 87d6693d8233..1a4fec330eaa 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -234,7 +234,14 @@ int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
attr->prog_type != BPF_PROG_TYPE_XDP)
return -EINVAL;
- if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
+ if (attr->prog_flags & ~(BPF_F_XDP_DEV_BOUND_ONLY | BPF_F_XDP_HAS_FRAGS))
+ return -EINVAL;
+
+ /* Frags are allowed only if program is dev-bound-only, but not
+ * if it is requesting bpf offload.
+ */
+ if (attr->prog_flags & BPF_F_XDP_HAS_FRAGS &&
+ !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY))
return -EINVAL;
if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
@@ -847,10 +854,11 @@ void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
if (!ops)
goto out;
- if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP))
- p = ops->xmo_rx_timestamp;
- else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH))
- p = ops->xmo_rx_hash;
+#define XDP_METADATA_KFUNC(name, _, __, xmo) \
+ if (func_id == bpf_xdp_metadata_kfunc_id(name)) p = ops->xmo;
+ XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+
out:
up_read(&bpf_devs_lock);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index eb01c31ed591..85c1d908f70f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -514,6 +514,7 @@ void btf_record_free(struct btf_record *rec)
switch (rec->fields[i].type) {
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
if (rec->fields[i].kptr.module)
module_put(rec->fields[i].kptr.module);
btf_put(rec->fields[i].kptr.btf);
@@ -560,6 +561,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
switch (fields[i].type) {
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
btf_get(fields[i].kptr.btf);
if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
ret = -ENXIO;
@@ -650,6 +652,7 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
WRITE_ONCE(*(u64 *)field_ptr, 0);
break;
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
if (!xchgd_field)
break;
@@ -1045,6 +1048,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
break;
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
case BPF_REFCOUNT:
if (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
@@ -2745,7 +2749,7 @@ free_used_maps:
* period before we can tear down JIT memory since symbols
* are already exposed under kallsyms.
*/
- __bpf_prog_put_noref(prog, prog->aux->func_cnt);
+ __bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
return err;
free_prog_sec:
free_uid(prog->aux->user);
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index c4ab9d6cdbe9..7473068ed313 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -35,16 +35,13 @@ static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_comm
u32 *tid,
bool skip_if_dup_files)
{
- struct task_struct *task, *next_task;
+ struct task_struct *task;
struct pid *pid;
- u32 saved_tid;
+ u32 next_tid;
if (!*tid) {
/* The first time, the iterator calls this function. */
pid = find_pid_ns(common->pid, common->ns);
- if (!pid)
- return NULL;
-
task = get_pid_task(pid, PIDTYPE_TGID);
if (!task)
return NULL;
@@ -66,44 +63,27 @@ static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_comm
return task;
}
- pid = find_pid_ns(common->pid_visiting, common->ns);
- if (!pid)
- return NULL;
-
- task = get_pid_task(pid, PIDTYPE_PID);
+ task = find_task_by_pid_ns(common->pid_visiting, common->ns);
if (!task)
return NULL;
retry:
- if (!pid_alive(task)) {
- put_task_struct(task);
- return NULL;
- }
-
- next_task = next_thread(task);
- put_task_struct(task);
- if (!next_task)
- return NULL;
+ task = next_thread(task);
- saved_tid = *tid;
- *tid = __task_pid_nr_ns(next_task, PIDTYPE_PID, common->ns);
- if (!*tid || *tid == common->pid) {
+ next_tid = __task_pid_nr_ns(task, PIDTYPE_PID, common->ns);
+ if (!next_tid || next_tid == common->pid) {
/* Run out of tasks of a process. The tasks of a
* thread_group are linked as circular linked list.
*/
- *tid = saved_tid;
return NULL;
}
- get_task_struct(next_task);
- common->pid_visiting = *tid;
-
- if (skip_if_dup_files && task->files == task->group_leader->files) {
- task = next_task;
+ if (skip_if_dup_files && task->files == task->group_leader->files)
goto retry;
- }
- return next_task;
+ *tid = common->pid_visiting = next_tid;
+ get_task_struct(task);
+ return task;
}
static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *common,
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 53ff50cac61e..e97aeda3a86b 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
goto out;
}
- /* clear all bits except SHARE_IPMODIFY */
- tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
+ /* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
+ tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c0c7d137066a..c8dae2fde115 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -304,7 +304,7 @@ struct bpf_kfunc_call_arg_meta {
/* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
* generally to pass info about user-defined local kptr types to later
* verification logic
- * bpf_obj_drop
+ * bpf_obj_drop/bpf_percpu_obj_drop
* Record the local kptr type to be drop'd
* bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type)
* Record the local kptr type to be refcount_incr'd and use
@@ -543,6 +543,7 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
}
static bool is_callback_calling_kfunc(u32 btf_id);
+static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
static bool is_callback_calling_function(enum bpf_func_id func_id)
{
@@ -1748,7 +1749,9 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
return -ENOMEM;
dst_state->jmp_history_cnt = src->jmp_history_cnt;
- /* if dst has more stack frames then src frame, free them */
+ /* if dst has more stack frames then src frame, free them, this is also
+ * necessary in case of exceptional exits using bpf_throw.
+ */
for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
free_func_state(dst_state->frame[i]);
dst_state->frame[i] = NULL;
@@ -2454,6 +2457,68 @@ static int add_subprog(struct bpf_verifier_env *env, int off)
return env->subprog_cnt - 1;
}
+static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env *env)
+{
+ struct bpf_prog_aux *aux = env->prog->aux;
+ struct btf *btf = aux->btf;
+ const struct btf_type *t;
+ u32 main_btf_id, id;
+ const char *name;
+ int ret, i;
+
+ /* Non-zero func_info_cnt implies valid btf */
+ if (!aux->func_info_cnt)
+ return 0;
+ main_btf_id = aux->func_info[0].type_id;
+
+ t = btf_type_by_id(btf, main_btf_id);
+ if (!t) {
+ verbose(env, "invalid btf id for main subprog in func_info\n");
+ return -EINVAL;
+ }
+
+ name = btf_find_decl_tag_value(btf, t, -1, "exception_callback:");
+ if (IS_ERR(name)) {
+ ret = PTR_ERR(name);
+ /* If there is no tag present, there is no exception callback */
+ if (ret == -ENOENT)
+ ret = 0;
+ else if (ret == -EEXIST)
+ verbose(env, "multiple exception callback tags for main subprog\n");
+ return ret;
+ }
+
+ ret = btf_find_by_name_kind(btf, name, BTF_KIND_FUNC);
+ if (ret < 0) {
+ verbose(env, "exception callback '%s' could not be found in BTF\n", name);
+ return ret;
+ }
+ id = ret;
+ t = btf_type_by_id(btf, id);
+ if (btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
+ verbose(env, "exception callback '%s' must have global linkage\n", name);
+ return -EINVAL;
+ }
+ ret = 0;
+ for (i = 0; i < aux->func_info_cnt; i++) {
+ if (aux->func_info[i].type_id != id)
+ continue;
+ ret = aux->func_info[i].insn_off;
+ /* Further func_info and subprog checks will also happen
+ * later, so assume this is the right insn_off for now.
+ */
+ if (!ret) {
+ verbose(env, "invalid exception callback insn_off in func_info: 0\n");
+ ret = -EINVAL;
+ }
+ }
+ if (!ret) {
+ verbose(env, "exception callback type id not found in func_info\n");
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
#define MAX_KFUNC_DESCS 256
#define MAX_KFUNC_BTFS 256
@@ -2793,8 +2858,8 @@ bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *subprog = env->subprog_info;
+ int i, ret, insn_cnt = env->prog->len, ex_cb_insn;
struct bpf_insn *insn = env->prog->insnsi;
- int i, ret, insn_cnt = env->prog->len;
/* Add entry function. */
ret = add_subprog(env, 0);
@@ -2820,6 +2885,26 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
return ret;
}
+ ret = bpf_find_exception_callback_insn_off(env);
+ if (ret < 0)
+ return ret;
+ ex_cb_insn = ret;
+
+ /* If ex_cb_insn > 0, this means that the main program has a subprog
+ * marked using BTF decl tag to serve as the exception callback.
+ */
+ if (ex_cb_insn) {
+ ret = add_subprog(env, ex_cb_insn);
+ if (ret < 0)
+ return ret;
+ for (i = 1; i < env->subprog_cnt; i++) {
+ if (env->subprog_info[i].start != ex_cb_insn)
+ continue;
+ env->exception_callback_subprog = i;
+ break;
+ }
+ }
+
/* Add a fake 'exit' subprog which could simplify subprog iteration
* logic. 'subprog_cnt' should not be increased.
*/
@@ -2868,7 +2953,7 @@ next:
if (i == subprog_end - 1) {
/* to avoid fall-through from one subprog into another
* the last insn of the subprog should be either exit
- * or unconditional jump back
+ * or unconditional jump back or bpf_throw call
*/
if (code != (BPF_JMP | BPF_EXIT) &&
code != (BPF_JMP32 | BPF_JA) &&
@@ -4999,6 +5084,8 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
perm_flags |= PTR_UNTRUSTED;
} else {
perm_flags = PTR_MAYBE_NULL | MEM_ALLOC;
+ if (kptr_field->type == BPF_KPTR_PERCPU)
+ perm_flags |= MEM_PERCPU;
}
if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
@@ -5042,7 +5129,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
*/
if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
kptr_field->kptr.btf, kptr_field->kptr.btf_id,
- kptr_field->type == BPF_KPTR_REF))
+ kptr_field->type != BPF_KPTR_UNREF))
goto bad_type;
return 0;
bad_type:
@@ -5086,7 +5173,18 @@ static bool rcu_safe_kptr(const struct btf_field *field)
{
const struct btf_field_kptr *kptr = &field->kptr;
- return field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id);
+ return field->type == BPF_KPTR_PERCPU ||
+ (field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id));
+}
+
+static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr_field)
+{
+ if (rcu_safe_kptr(kptr_field) && in_rcu_cs(env)) {
+ if (kptr_field->type != BPF_KPTR_PERCPU)
+ return PTR_MAYBE_NULL | MEM_RCU;
+ return PTR_MAYBE_NULL | MEM_RCU | MEM_PERCPU;
+ }
+ return PTR_MAYBE_NULL | PTR_UNTRUSTED;
}
static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
@@ -5112,7 +5210,8 @@ static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
/* We only allow loading referenced kptr, since it will be marked as
* untrusted, similar to unreferenced kptr.
*/
- if (class != BPF_LDX && kptr_field->type == BPF_KPTR_REF) {
+ if (class != BPF_LDX &&
+ (kptr_field->type == BPF_KPTR_REF || kptr_field->type == BPF_KPTR_PERCPU)) {
verbose(env, "store to referenced kptr disallowed\n");
return -EACCES;
}
@@ -5123,10 +5222,7 @@ static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
* value from map as PTR_TO_BTF_ID, with the correct type.
*/
mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
- kptr_field->kptr.btf_id,
- rcu_safe_kptr(kptr_field) && in_rcu_cs(env) ?
- PTR_MAYBE_NULL | MEM_RCU :
- PTR_MAYBE_NULL | PTR_UNTRUSTED);
+ kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field));
/* For mark_ptr_or_null_reg */
val_reg->id = ++env->id_gen;
} else if (class == BPF_STX) {
@@ -5180,6 +5276,7 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
switch (field->type) {
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
if (src != ACCESS_DIRECT) {
verbose(env, "kptr cannot be accessed indirectly by helper\n");
return -EACCES;
@@ -5647,6 +5744,27 @@ continue_func:
for (; i < subprog_end; i++) {
int next_insn, sidx;
+ if (bpf_pseudo_kfunc_call(insn + i) && !insn[i].off) {
+ bool err = false;
+
+ if (!is_bpf_throw_kfunc(insn + i))
+ continue;
+ if (subprog[idx].is_cb)
+ err = true;
+ for (int c = 0; c < frame && !err; c++) {
+ if (subprog[ret_prog[c]].is_cb) {
+ err = true;
+ break;
+ }
+ }
+ if (!err)
+ continue;
+ verbose(env,
+ "bpf_throw kfunc (insn %d) cannot be called from callback subprog %d\n",
+ i, idx);
+ return -EINVAL;
+ }
+
if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
continue;
/* remember insn and function to return to */
@@ -5669,6 +5787,10 @@ continue_func:
/* async callbacks don't increase bpf prog stack size unless called directly */
if (!bpf_pseudo_call(insn + i))
continue;
+ if (subprog[sidx].is_exception_cb) {
+ verbose(env, "insn %d cannot call exception cb directly\n", i);
+ return -EINVAL;
+ }
}
i = next_insn;
idx = sidx;
@@ -5690,8 +5812,13 @@ continue_func:
* tail call counter throughout bpf2bpf calls combined with tailcalls
*/
if (tail_call_reachable)
- for (j = 0; j < frame; j++)
+ for (j = 0; j < frame; j++) {
+ if (subprog[ret_prog[j]].is_exception_cb) {
+ verbose(env, "cannot tail call within exception cb\n");
+ return -EINVAL;
+ }
subprog[ret_prog[j]].tail_call_reachable = true;
+ }
if (subprog[0].tail_call_reachable)
env->prog->aux->tail_call_reachable = true;
@@ -6207,7 +6334,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
}
if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
- !reg->ref_obj_id) {
+ !(reg->type & MEM_RCU) && !reg->ref_obj_id) {
verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
return -EFAULT;
}
@@ -7318,7 +7445,7 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
return -EACCES;
}
- if (kptr_field->type != BPF_KPTR_REF) {
+ if (kptr_field->type != BPF_KPTR_REF && kptr_field->type != BPF_KPTR_PERCPU) {
verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
return -EACCES;
}
@@ -7751,6 +7878,7 @@ static const struct bpf_reg_types btf_ptr_types = {
static const struct bpf_reg_types percpu_btf_ptr_types = {
.types = {
PTR_TO_BTF_ID | MEM_PERCPU,
+ PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU,
PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED,
}
};
@@ -7829,8 +7957,10 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
if (base_type(arg_type) == ARG_PTR_TO_MEM)
type &= ~DYNPTR_TYPE_FLAG_MASK;
- if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type))
+ if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) {
type &= ~MEM_ALLOC;
+ type &= ~MEM_PERCPU;
+ }
for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
expected = compatible->types[i];
@@ -7913,6 +8043,7 @@ found:
break;
}
case PTR_TO_BTF_ID | MEM_ALLOC:
+ case PTR_TO_BTF_ID | MEM_PERCPU | MEM_ALLOC:
if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock &&
meta->func_id != BPF_FUNC_kptr_xchg) {
verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
@@ -7924,6 +8055,7 @@ found:
}
break;
case PTR_TO_BTF_ID | MEM_PERCPU:
+ case PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU:
case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED:
/* Handled by helper specific checks */
break;
@@ -8900,6 +9032,7 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
* callbacks
*/
if (set_callee_state_cb != set_callee_state) {
+ env->subprog_info[subprog].is_cb = true;
if (bpf_pseudo_kfunc_call(insn) &&
!is_callback_calling_kfunc(insn->imm)) {
verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
@@ -9289,7 +9422,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
verbose(env, "to caller at %d:\n", *insn_idx);
print_verifier_state(env, caller, true);
}
- /* clear everything in the callee */
+ /* clear everything in the callee. In case of exceptional exits using
+ * bpf_throw, this will be done by copy_verifier_state for extra frames. */
free_func_state(callee);
state->frame[state->curframe--] = NULL;
return 0;
@@ -9413,17 +9547,17 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
return 0;
}
-static int check_reference_leak(struct bpf_verifier_env *env)
+static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit)
{
struct bpf_func_state *state = cur_func(env);
bool refs_lingering = false;
int i;
- if (state->frameno && !state->in_callback_fn)
+ if (!exception_exit && state->frameno && !state->in_callback_fn)
return 0;
for (i = 0; i < state->acquired_refs; i++) {
- if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
+ if (!exception_exit && state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
continue;
verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
state->refs[i].id, state->refs[i].insn_idx);
@@ -9530,6 +9664,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
int *insn_idx_p)
{
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
+ bool returns_cpu_specific_alloc_ptr = false;
const struct bpf_func_proto *fn = NULL;
enum bpf_return_type ret_type;
enum bpf_type_flag ret_flag;
@@ -9640,6 +9775,26 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EFAULT;
}
err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
+ } else if (func_id == BPF_FUNC_kptr_xchg && meta.ref_obj_id) {
+ u32 ref_obj_id = meta.ref_obj_id;
+ bool in_rcu = in_rcu_cs(env);
+ struct bpf_func_state *state;
+ struct bpf_reg_state *reg;
+
+ err = release_reference_state(cur_func(env), ref_obj_id);
+ if (!err) {
+ bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
+ if (reg->ref_obj_id == ref_obj_id) {
+ if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) {
+ reg->ref_obj_id = 0;
+ reg->type &= ~MEM_ALLOC;
+ reg->type |= MEM_RCU;
+ } else {
+ mark_reg_invalid(env, reg);
+ }
+ }
+ }));
+ }
} else if (meta.ref_obj_id) {
err = release_reference(env, meta.ref_obj_id);
} else if (register_is_null(&regs[meta.release_regno])) {
@@ -9657,7 +9812,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
switch (func_id) {
case BPF_FUNC_tail_call:
- err = check_reference_leak(env);
+ err = check_reference_leak(env, false);
if (err) {
verbose(env, "tail_call would lead to reference leak\n");
return err;
@@ -9768,6 +9923,23 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
break;
}
+ case BPF_FUNC_per_cpu_ptr:
+ case BPF_FUNC_this_cpu_ptr:
+ {
+ struct bpf_reg_state *reg = &regs[BPF_REG_1];
+ const struct btf_type *type;
+
+ if (reg->type & MEM_RCU) {
+ type = btf_type_by_id(reg->btf, reg->btf_id);
+ if (!type || !btf_type_is_struct(type)) {
+ verbose(env, "Helper has invalid btf/btf_id in R1\n");
+ return -EFAULT;
+ }
+ returns_cpu_specific_alloc_ptr = true;
+ env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true;
+ }
+ break;
+ }
case BPF_FUNC_user_ringbuf_drain:
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
set_user_ringbuf_callback_state);
@@ -9857,14 +10029,18 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
regs[BPF_REG_0].mem_size = tsize;
} else {
- /* MEM_RDONLY may be carried from ret_flag, but it
- * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
- * it will confuse the check of PTR_TO_BTF_ID in
- * check_mem_access().
- */
- ret_flag &= ~MEM_RDONLY;
+ if (returns_cpu_specific_alloc_ptr) {
+ regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC | MEM_RCU;
+ } else {
+ /* MEM_RDONLY may be carried from ret_flag, but it
+ * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
+ * it will confuse the check of PTR_TO_BTF_ID in
+ * check_mem_access().
+ */
+ ret_flag &= ~MEM_RDONLY;
+ regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
+ }
- regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
regs[BPF_REG_0].btf = meta.ret_btf;
regs[BPF_REG_0].btf_id = meta.ret_btf_id;
}
@@ -9880,8 +10056,11 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
if (func_id == BPF_FUNC_kptr_xchg) {
ret_btf = meta.kptr_field->kptr.btf;
ret_btf_id = meta.kptr_field->kptr.btf_id;
- if (!btf_is_kernel(ret_btf))
+ if (!btf_is_kernel(ret_btf)) {
regs[BPF_REG_0].type |= MEM_ALLOC;
+ if (meta.kptr_field->type == BPF_KPTR_PERCPU)
+ regs[BPF_REG_0].type |= MEM_PERCPU;
+ }
} else {
if (fn->ret_btf_id == BPF_PTR_POISON) {
verbose(env, "verifier internal error:");
@@ -10266,6 +10445,9 @@ enum special_kfunc_type {
KF_bpf_dynptr_slice,
KF_bpf_dynptr_slice_rdwr,
KF_bpf_dynptr_clone,
+ KF_bpf_percpu_obj_new_impl,
+ KF_bpf_percpu_obj_drop_impl,
+ KF_bpf_throw,
};
BTF_SET_START(special_kfunc_set)
@@ -10286,6 +10468,9 @@ BTF_ID(func, bpf_dynptr_from_xdp)
BTF_ID(func, bpf_dynptr_slice)
BTF_ID(func, bpf_dynptr_slice_rdwr)
BTF_ID(func, bpf_dynptr_clone)
+BTF_ID(func, bpf_percpu_obj_new_impl)
+BTF_ID(func, bpf_percpu_obj_drop_impl)
+BTF_ID(func, bpf_throw)
BTF_SET_END(special_kfunc_set)
BTF_ID_LIST(special_kfunc_list)
@@ -10308,6 +10493,9 @@ BTF_ID(func, bpf_dynptr_from_xdp)
BTF_ID(func, bpf_dynptr_slice)
BTF_ID(func, bpf_dynptr_slice_rdwr)
BTF_ID(func, bpf_dynptr_clone)
+BTF_ID(func, bpf_percpu_obj_new_impl)
+BTF_ID(func, bpf_percpu_obj_drop_impl)
+BTF_ID(func, bpf_throw)
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
{
@@ -10625,6 +10813,12 @@ static bool is_callback_calling_kfunc(u32 btf_id)
return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
}
+static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
+{
+ return bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
+ insn->imm == special_kfunc_list[KF_bpf_throw];
+}
+
static bool is_rbtree_lock_required_kfunc(u32 btf_id)
{
return is_bpf_rbtree_api_kfunc(btf_id);
@@ -11002,7 +11196,17 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
break;
case KF_ARG_PTR_TO_ALLOC_BTF_ID:
- if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
+ if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) {
+ if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) {
+ verbose(env, "arg#%d expected for bpf_obj_drop_impl()\n", i);
+ return -EINVAL;
+ }
+ } else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) {
+ if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) {
+ verbose(env, "arg#%d expected for bpf_percpu_obj_drop_impl()\n", i);
+ return -EINVAL;
+ }
+ } else {
verbose(env, "arg#%d expected pointer to allocated object\n", i);
return -EINVAL;
}
@@ -11010,8 +11214,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
verbose(env, "allocated object must be referenced\n");
return -EINVAL;
}
- if (meta->btf == btf_vmlinux &&
- meta->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
+ if (meta->btf == btf_vmlinux) {
meta->arg_btf = reg->btf;
meta->arg_btf_id = reg->btf_id;
}
@@ -11202,6 +11405,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
break;
}
case KF_ARG_PTR_TO_CALLBACK:
+ if (reg->type != PTR_TO_FUNC) {
+ verbose(env, "arg%d expected pointer to func\n", i);
+ return -EINVAL;
+ }
meta->subprogno = reg->subprogno;
break;
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
@@ -11280,6 +11487,8 @@ static int fetch_kfunc_meta(struct bpf_verifier_env *env,
return 0;
}
+static int check_return_code(struct bpf_verifier_env *env, int regno);
+
static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
{
@@ -11401,6 +11610,24 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
}
}
+ if (meta.func_id == special_kfunc_list[KF_bpf_throw]) {
+ if (!bpf_jit_supports_exceptions()) {
+ verbose(env, "JIT does not support calling kfunc %s#%d\n",
+ func_name, meta.func_id);
+ return -ENOTSUPP;
+ }
+ env->seen_exception = true;
+
+ /* In the case of the default callback, the cookie value passed
+ * to bpf_throw becomes the return value of the program.
+ */
+ if (!env->exception_callback_subprog) {
+ err = check_return_code(env, BPF_REG_1);
+ if (err < 0)
+ return err;
+ }
+ }
+
for (i = 0; i < CALLER_SAVED_REGS; i++)
mark_reg_not_init(env, regs, caller_saved[i]);
@@ -11411,6 +11638,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
/* Only exception is bpf_obj_new_impl */
if (meta.btf != btf_vmlinux ||
(meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] &&
+ meta.func_id != special_kfunc_list[KF_bpf_percpu_obj_new_impl] &&
meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) {
verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
return -EINVAL;
@@ -11424,11 +11652,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id);
if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
- if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
+ if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
+ meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
+ struct btf_struct_meta *struct_meta;
struct btf *ret_btf;
u32 ret_btf_id;
- if (unlikely(!bpf_global_ma_set))
+ if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set)
+ return -ENOMEM;
+
+ if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && !bpf_global_percpu_ma_set)
return -ENOMEM;
if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
@@ -11441,24 +11674,38 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
/* This may be NULL due to user not supplying a BTF */
if (!ret_btf) {
- verbose(env, "bpf_obj_new requires prog BTF\n");
+ verbose(env, "bpf_obj_new/bpf_percpu_obj_new requires prog BTF\n");
return -EINVAL;
}
ret_t = btf_type_by_id(ret_btf, ret_btf_id);
if (!ret_t || !__btf_type_is_struct(ret_t)) {
- verbose(env, "bpf_obj_new type ID argument must be of a struct\n");
+ verbose(env, "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct\n");
return -EINVAL;
}
+ struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id);
+ if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
+ if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) {
+ verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n");
+ return -EINVAL;
+ }
+
+ if (struct_meta) {
+ verbose(env, "bpf_percpu_obj_new type ID argument must not contain special fields\n");
+ return -EINVAL;
+ }
+ }
+
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
regs[BPF_REG_0].btf = ret_btf;
regs[BPF_REG_0].btf_id = ret_btf_id;
+ if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl])
+ regs[BPF_REG_0].type |= MEM_PERCPU;
insn_aux->obj_new_size = ret_t->size;
- insn_aux->kptr_struct_meta =
- btf_find_struct_meta(ret_btf, ret_btf_id);
+ insn_aux->kptr_struct_meta = struct_meta;
} else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
@@ -11595,7 +11842,8 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
regs[BPF_REG_0].id = ++env->id_gen;
} else if (btf_type_is_void(t)) {
if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
- if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
+ if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
+ meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) {
insn_aux->kptr_struct_meta =
btf_find_struct_meta(meta.arg_btf,
meta.arg_btf_id);
@@ -14425,7 +14673,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
* gen_ld_abs() may terminate the program at runtime, leading to
* reference leak.
*/
- err = check_reference_leak(env);
+ err = check_reference_leak(env, false);
if (err) {
verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
return err;
@@ -14474,7 +14722,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
-static int check_return_code(struct bpf_verifier_env *env)
+static int check_return_code(struct bpf_verifier_env *env, int regno)
{
struct tnum enforce_attach_type_range = tnum_unknown;
const struct bpf_prog *prog = env->prog;
@@ -14486,7 +14734,7 @@ static int check_return_code(struct bpf_verifier_env *env)
const bool is_subprog = frame->subprogno;
/* LSM and struct_ops func-ptr's return type could be "void" */
- if (!is_subprog) {
+ if (!is_subprog || frame->in_exception_callback_fn) {
switch (prog_type) {
case BPF_PROG_TYPE_LSM:
if (prog->expected_attach_type == BPF_LSM_CGROUP)
@@ -14508,22 +14756,22 @@ static int check_return_code(struct bpf_verifier_env *env)
* of bpf_exit, which means that program wrote
* something into it earlier
*/
- err = check_reg_arg(env, BPF_REG_0, SRC_OP);
+ err = check_reg_arg(env, regno, SRC_OP);
if (err)
return err;
- if (is_pointer_value(env, BPF_REG_0)) {
- verbose(env, "R0 leaks addr as return value\n");
+ if (is_pointer_value(env, regno)) {
+ verbose(env, "R%d leaks addr as return value\n", regno);
return -EACCES;
}
- reg = cur_regs(env) + BPF_REG_0;
+ reg = cur_regs(env) + regno;
if (frame->in_async_callback_fn) {
/* enforce return zero from async callbacks like timer */
if (reg->type != SCALAR_VALUE) {
- verbose(env, "In async callback the register R0 is not a known value (%s)\n",
- reg_type_str(env, reg->type));
+ verbose(env, "In async callback the register R%d is not a known value (%s)\n",
+ regno, reg_type_str(env, reg->type));
return -EINVAL;
}
@@ -14534,10 +14782,10 @@ static int check_return_code(struct bpf_verifier_env *env)
return 0;
}
- if (is_subprog) {
+ if (is_subprog && !frame->in_exception_callback_fn) {
if (reg->type != SCALAR_VALUE) {
- verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
- reg_type_str(env, reg->type));
+ verbose(env, "At subprogram exit the register R%d is not a scalar value (%s)\n",
+ regno, reg_type_str(env, reg->type));
return -EINVAL;
}
return 0;
@@ -14619,8 +14867,8 @@ static int check_return_code(struct bpf_verifier_env *env)
}
if (reg->type != SCALAR_VALUE) {
- verbose(env, "At program exit the register R0 is not a known value (%s)\n",
- reg_type_str(env, reg->type));
+ verbose(env, "At program exit the register R%d is not a known value (%s)\n",
+ regno, reg_type_str(env, reg->type));
return -EINVAL;
}
@@ -14891,8 +15139,8 @@ static int check_cfg(struct bpf_verifier_env *env)
{
int insn_cnt = env->prog->len;
int *insn_stack, *insn_state;
- int ret = 0;
- int i;
+ int ex_insn_beg, i, ret = 0;
+ bool ex_done = false;
insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
@@ -14908,6 +15156,7 @@ static int check_cfg(struct bpf_verifier_env *env)
insn_stack[0] = 0; /* 0 is the first instruction */
env->cfg.cur_stack = 1;
+walk_cfg:
while (env->cfg.cur_stack > 0) {
int t = insn_stack[env->cfg.cur_stack - 1];
@@ -14934,6 +15183,16 @@ static int check_cfg(struct bpf_verifier_env *env)
goto err_free;
}
+ if (env->exception_callback_subprog && !ex_done) {
+ ex_insn_beg = env->subprog_info[env->exception_callback_subprog].start;
+
+ insn_state[ex_insn_beg] = DISCOVERED;
+ insn_stack[0] = ex_insn_beg;
+ env->cfg.cur_stack = 1;
+ ex_done = true;
+ goto walk_cfg;
+ }
+
for (i = 0; i < insn_cnt; i++) {
if (insn_state[i] != EXPLORED) {
verbose(env, "unreachable insn %d\n", i);
@@ -14971,20 +15230,18 @@ static int check_abnormal_return(struct bpf_verifier_env *env)
#define MIN_BPF_FUNCINFO_SIZE 8
#define MAX_FUNCINFO_REC_SIZE 252
-static int check_btf_func(struct bpf_verifier_env *env,
- const union bpf_attr *attr,
- bpfptr_t uattr)
+static int check_btf_func_early(struct bpf_verifier_env *env,
+ const union bpf_attr *attr,
+ bpfptr_t uattr)
{
- const struct btf_type *type, *func_proto, *ret_type;
- u32 i, nfuncs, urec_size, min_size;
u32 krec_size = sizeof(struct bpf_func_info);
+ const struct btf_type *type, *func_proto;
+ u32 i, nfuncs, urec_size, min_size;
struct bpf_func_info *krecord;
- struct bpf_func_info_aux *info_aux = NULL;
struct bpf_prog *prog;
const struct btf *btf;
- bpfptr_t urecord;
u32 prev_offset = 0;
- bool scalar_return;
+ bpfptr_t urecord;
int ret = -ENOMEM;
nfuncs = attr->func_info_cnt;
@@ -14994,11 +15251,6 @@ static int check_btf_func(struct bpf_verifier_env *env,
return 0;
}
- if (nfuncs != env->subprog_cnt) {
- verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
- return -EINVAL;
- }
-
urec_size = attr->func_info_rec_size;
if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
urec_size > MAX_FUNCINFO_REC_SIZE ||
@@ -15016,9 +15268,6 @@ static int check_btf_func(struct bpf_verifier_env *env,
krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
if (!krecord)
return -ENOMEM;
- info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
- if (!info_aux)
- goto err_free;
for (i = 0; i < nfuncs; i++) {
ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
@@ -15057,11 +15306,6 @@ static int check_btf_func(struct bpf_verifier_env *env,
goto err_free;
}
- if (env->subprog_info[i].start != krecord[i].insn_off) {
- verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
- goto err_free;
- }
-
/* check type_id */
type = btf_type_by_id(btf, krecord[i].type_id);
if (!type || !btf_type_is_func(type)) {
@@ -15069,12 +15313,77 @@ static int check_btf_func(struct bpf_verifier_env *env,
krecord[i].type_id);
goto err_free;
}
- info_aux[i].linkage = BTF_INFO_VLEN(type->info);
func_proto = btf_type_by_id(btf, type->type);
if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
/* btf_func_check() already verified it during BTF load */
goto err_free;
+
+ prev_offset = krecord[i].insn_off;
+ bpfptr_add(&urecord, urec_size);
+ }
+
+ prog->aux->func_info = krecord;
+ prog->aux->func_info_cnt = nfuncs;
+ return 0;
+
+err_free:
+ kvfree(krecord);
+ return ret;
+}
+
+static int check_btf_func(struct bpf_verifier_env *env,
+ const union bpf_attr *attr,
+ bpfptr_t uattr)
+{
+ const struct btf_type *type, *func_proto, *ret_type;
+ u32 i, nfuncs, urec_size;
+ struct bpf_func_info *krecord;
+ struct bpf_func_info_aux *info_aux = NULL;
+ struct bpf_prog *prog;
+ const struct btf *btf;
+ bpfptr_t urecord;
+ bool scalar_return;
+ int ret = -ENOMEM;
+
+ nfuncs = attr->func_info_cnt;
+ if (!nfuncs) {
+ if (check_abnormal_return(env))
+ return -EINVAL;
+ return 0;
+ }
+ if (nfuncs != env->subprog_cnt) {
+ verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
+ return -EINVAL;
+ }
+
+ urec_size = attr->func_info_rec_size;
+
+ prog = env->prog;
+ btf = prog->aux->btf;
+
+ urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
+
+ krecord = prog->aux->func_info;
+ info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
+ if (!info_aux)
+ return -ENOMEM;
+
+ for (i = 0; i < nfuncs; i++) {
+ /* check insn_off */
+ ret = -EINVAL;
+
+ if (env->subprog_info[i].start != krecord[i].insn_off) {
+ verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
+ goto err_free;
+ }
+
+ /* Already checked type_id */
+ type = btf_type_by_id(btf, krecord[i].type_id);
+ info_aux[i].linkage = BTF_INFO_VLEN(type->info);
+ /* Already checked func_proto */
+ func_proto = btf_type_by_id(btf, type->type);
+
ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
scalar_return =
btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
@@ -15087,17 +15396,13 @@ static int check_btf_func(struct bpf_verifier_env *env,
goto err_free;
}
- prev_offset = krecord[i].insn_off;
bpfptr_add(&urecord, urec_size);
}
- prog->aux->func_info = krecord;
- prog->aux->func_info_cnt = nfuncs;
prog->aux->func_info_aux = info_aux;
return 0;
err_free:
- kvfree(krecord);
kfree(info_aux);
return ret;
}
@@ -15110,7 +15415,8 @@ static void adjust_btf_func(struct bpf_verifier_env *env)
if (!aux->func_info)
return;
- for (i = 0; i < env->subprog_cnt; i++)
+ /* func_info is not available for hidden subprogs */
+ for (i = 0; i < env->subprog_cnt - env->hidden_subprog_cnt; i++)
aux->func_info[i].insn_off = env->subprog_info[i].start;
}
@@ -15314,9 +15620,9 @@ static int check_core_relo(struct bpf_verifier_env *env,
return err;
}
-static int check_btf_info(struct bpf_verifier_env *env,
- const union bpf_attr *attr,
- bpfptr_t uattr)
+static int check_btf_info_early(struct bpf_verifier_env *env,
+ const union bpf_attr *attr,
+ bpfptr_t uattr)
{
struct btf *btf;
int err;
@@ -15336,6 +15642,24 @@ static int check_btf_info(struct bpf_verifier_env *env,
}
env->prog->aux->btf = btf;
+ err = check_btf_func_early(env, attr, uattr);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int check_btf_info(struct bpf_verifier_env *env,
+ const union bpf_attr *attr,
+ bpfptr_t uattr)
+{
+ int err;
+
+ if (!attr->func_info_cnt && !attr->line_info_cnt) {
+ if (check_abnormal_return(env))
+ return -EINVAL;
+ return 0;
+ }
+
err = check_btf_func(env, attr, uattr);
if (err)
return err;
@@ -16438,6 +16762,7 @@ static int do_check(struct bpf_verifier_env *env)
int prev_insn_idx = -1;
for (;;) {
+ bool exception_exit = false;
struct bpf_insn *insn;
u8 class;
int err;
@@ -16652,12 +16977,17 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
}
- if (insn->src_reg == BPF_PSEUDO_CALL)
+ if (insn->src_reg == BPF_PSEUDO_CALL) {
err = check_func_call(env, insn, &env->insn_idx);
- else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
+ } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
err = check_kfunc_call(env, insn, &env->insn_idx);
- else
+ if (!err && is_bpf_throw_kfunc(insn)) {
+ exception_exit = true;
+ goto process_bpf_exit_full;
+ }
+ } else {
err = check_helper_call(env, insn, &env->insn_idx);
+ }
if (err)
return err;
@@ -16687,7 +17017,7 @@ static int do_check(struct bpf_verifier_env *env)
verbose(env, "BPF_EXIT uses reserved fields\n");
return -EINVAL;
}
-
+process_bpf_exit_full:
if (env->cur_state->active_lock.ptr &&
!in_rbtree_lock_required_cb(env)) {
verbose(env, "bpf_spin_unlock is missing\n");
@@ -16706,10 +17036,23 @@ static int do_check(struct bpf_verifier_env *env)
* function, for which reference_state must
* match caller reference state when it exits.
*/
- err = check_reference_leak(env);
+ err = check_reference_leak(env, exception_exit);
if (err)
return err;
+ /* The side effect of the prepare_func_exit
+ * which is being skipped is that it frees
+ * bpf_func_state. Typically, process_bpf_exit
+ * will only be hit with outermost exit.
+ * copy_verifier_state in pop_stack will handle
+ * freeing of any extra bpf_func_state left over
+ * from not processing all nested function
+ * exits. We also skip return code checks as
+ * they are not needed for exceptional exits.
+ */
+ if (exception_exit)
+ goto process_bpf_exit;
+
if (state->curframe) {
/* exit from nested function */
err = prepare_func_exit(env, &env->insn_idx);
@@ -16719,7 +17062,7 @@ static int do_check(struct bpf_verifier_env *env)
continue;
}
- err = check_return_code(env);
+ err = check_return_code(env, BPF_REG_0);
if (err)
return err;
process_bpf_exit:
@@ -18012,6 +18355,9 @@ static int jit_subprogs(struct bpf_verifier_env *env)
}
func[i]->aux->num_exentries = num_exentries;
func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
+ func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb;
+ if (!i)
+ func[i]->aux->exception_boundary = env->seen_exception;
func[i] = bpf_int_jit_compile(func[i]);
if (!func[i]->jited) {
err = -ENOTSUPP;
@@ -18051,7 +18397,8 @@ static int jit_subprogs(struct bpf_verifier_env *env)
* the call instruction, as an index for this list
*/
func[i]->aux->func = func;
- func[i]->aux->func_cnt = env->subprog_cnt;
+ func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt;
+ func[i]->aux->real_func_cnt = env->subprog_cnt;
}
for (i = 0; i < env->subprog_cnt; i++) {
old_bpf_func = func[i]->bpf_func;
@@ -18097,7 +18444,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
prog->aux->extable = func[0]->aux->extable;
prog->aux->num_exentries = func[0]->aux->num_exentries;
prog->aux->func = func;
- prog->aux->func_cnt = env->subprog_cnt;
+ prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt;
+ prog->aux->real_func_cnt = env->subprog_cnt;
+ prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func;
+ prog->aux->exception_boundary = func[0]->aux->exception_boundary;
bpf_prog_jit_attempt_done(prog);
return 0;
out_free:
@@ -18264,21 +18614,35 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
insn->imm = BPF_CALL_IMM(desc->addr);
if (insn->off)
return 0;
- if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
+ if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
+ desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size;
+ if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) {
+ verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n",
+ insn_idx);
+ return -EFAULT;
+ }
+
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size);
insn_buf[1] = addr[0];
insn_buf[2] = addr[1];
insn_buf[3] = *insn;
*cnt = 4;
} else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
+ desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] ||
desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
+ if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) {
+ verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n",
+ insn_idx);
+ return -EFAULT;
+ }
+
if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
!kptr_struct_meta) {
verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n",
@@ -18319,6 +18683,33 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return 0;
}
+/* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */
+static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *patch, int len)
+{
+ struct bpf_subprog_info *info = env->subprog_info;
+ int cnt = env->subprog_cnt;
+ struct bpf_prog *prog;
+
+ /* We only reserve one slot for hidden subprogs in subprog_info. */
+ if (env->hidden_subprog_cnt) {
+ verbose(env, "verifier internal error: only one hidden subprog supported\n");
+ return -EFAULT;
+ }
+ /* We're not patching any existing instruction, just appending the new
+ * ones for the hidden subprog. Hence all of the adjustment operations
+ * in bpf_patch_insn_data are no-ops.
+ */
+ prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len);
+ if (!prog)
+ return -ENOMEM;
+ env->prog = prog;
+ info[cnt + 1].start = info[cnt].start;
+ info[cnt].start = prog->len - len + 1;
+ env->subprog_cnt++;
+ env->hidden_subprog_cnt++;
+ return 0;
+}
+
/* Do various post-verification rewrites in a single program pass.
* These rewrites simplify JIT and interpreter implementations.
*/
@@ -18337,6 +18728,26 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
struct bpf_map *map_ptr;
int i, ret, cnt, delta = 0;
+ if (env->seen_exception && !env->exception_callback_subprog) {
+ struct bpf_insn patch[] = {
+ env->prog->insnsi[insn_cnt - 1],
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ };
+
+ ret = add_hidden_subprog(env, patch, ARRAY_SIZE(patch));
+ if (ret < 0)
+ return ret;
+ prog = env->prog;
+ insn = prog->insnsi;
+
+ env->exception_callback_subprog = env->subprog_cnt - 1;
+ /* Don't update insn_cnt, as add_hidden_subprog always appends insns */
+ env->subprog_info[env->exception_callback_subprog].is_cb = true;
+ env->subprog_info[env->exception_callback_subprog].is_async_cb = true;
+ env->subprog_info[env->exception_callback_subprog].is_exception_cb = true;
+ }
+
for (i = 0; i < insn_cnt; i++, insn++) {
/* Make divide-by-zero exceptions impossible. */
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
@@ -18606,6 +19017,25 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
goto patch_call_imm;
}
+ /* bpf_per_cpu_ptr() and bpf_this_cpu_ptr() */
+ if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) {
+ /* patch with 'r1 = *(u64 *)(r1 + 0)' since for percpu data,
+ * bpf_mem_alloc() returns a ptr to the percpu data ptr.
+ */
+ insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
+ insn_buf[1] = *insn;
+ cnt = 2;
+
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ goto patch_call_imm;
+ }
+
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* and other inlining handlers are currently limited to 64 bit
* only.
@@ -19015,7 +19445,7 @@ static void free_states(struct bpf_verifier_env *env)
}
}
-static int do_check_common(struct bpf_verifier_env *env, int subprog)
+static int do_check_common(struct bpf_verifier_env *env, int subprog, bool is_ex_cb)
{
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
struct bpf_verifier_state *state;
@@ -19046,7 +19476,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
regs = state->frame[state->curframe]->regs;
if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
- ret = btf_prepare_func_args(env, subprog, regs);
+ ret = btf_prepare_func_args(env, subprog, regs, is_ex_cb);
if (ret)
goto out;
for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
@@ -19062,6 +19492,12 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
regs[i].id = ++env->id_gen;
}
}
+ if (is_ex_cb) {
+ state->frame[0]->in_exception_callback_fn = true;
+ env->subprog_info[subprog].is_cb = true;
+ env->subprog_info[subprog].is_async_cb = true;
+ env->subprog_info[subprog].is_exception_cb = true;
+ }
} else {
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
@@ -19126,7 +19562,7 @@ static int do_check_subprogs(struct bpf_verifier_env *env)
continue;
env->insn_idx = env->subprog_info[i].start;
WARN_ON_ONCE(env->insn_idx == 0);
- ret = do_check_common(env, i);
+ ret = do_check_common(env, i, env->exception_callback_subprog == i);
if (ret) {
return ret;
} else if (env->log.level & BPF_LOG_LEVEL) {
@@ -19143,7 +19579,7 @@ static int do_check_main(struct bpf_verifier_env *env)
int ret;
env->insn_idx = 0;
- ret = do_check_common(env, 0);
+ ret = do_check_common(env, 0, false);
if (!ret)
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
return ret;
@@ -19312,6 +19748,12 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
bpf_log(log, "Subprog %s doesn't exist\n", tname);
return -EINVAL;
}
+ if (aux->func && aux->func[subprog]->aux->exception_cb) {
+ bpf_log(log,
+ "%s programs cannot attach to exception callback\n",
+ prog_extension ? "Extension" : "FENTRY/FEXIT");
+ return -EINVAL;
+ }
conservative = aux->func_info_aux[subprog].unreliable;
if (prog_extension) {
if (conservative) {
@@ -19641,6 +20083,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
if (!tr)
return -ENOMEM;
+ if (tgt_prog && tgt_prog->aux->tail_call_reachable)
+ tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
+
prog->aux->dst_trampoline = tr;
return 0;
}
@@ -19736,6 +20181,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (!env->explored_states)
goto skip_full_check;
+ ret = check_btf_info_early(env, attr, uattr);
+ if (ret < 0)
+ goto skip_full_check;
+
ret = add_subprog_and_kfunc(env);
if (ret < 0)
goto skip_full_check;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index ecde4216201e..7916503e6a6a 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5111,6 +5111,104 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0xffffffff } }
},
+ /* MOVSX32 */
+ {
+ "ALU_MOVSX | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x00000000ffffffefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX32_REG(R1, R3, 8),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_MOVSX | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x00000000ffffbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX32_REG(R1, R3, 16),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_MOVSX | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x00000000deadbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX32_REG(R1, R3, 32),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* MOVSX64 REG */
+ {
+ "ALU64_MOVSX | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX64_REG(R1, R3, 8),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOVSX | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX64_REG(R1, R3, 16),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOVSX | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffdeadbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX64_REG(R1, R3, 32),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
/* BPF_ALU | BPF_ADD | BPF_X */
{
"ALU_ADD_X: 1 + 2 = 3",
@@ -6105,6 +6203,106 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 2 } },
},
+ /* BPF_ALU | BPF_DIV | BPF_X off=1 (SDIV) */
+ {
+ "ALU_SDIV_X: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG_OFF(BPF_DIV, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU | BPF_DIV | BPF_K off=1 (SDIV) */
+ {
+ "ALU_SDIV_K: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU32_IMM_OFF(BPF_DIV, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU64 | BPF_DIV | BPF_X off=1 (SDIV64) */
+ {
+ "ALU64_SDIV_X: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG_OFF(BPF_DIV, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU64 | BPF_DIV | BPF_K off=1 (SDIV64) */
+ {
+ "ALU64_SDIV_K: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU64_IMM_OFF(BPF_DIV, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_X off=1 (SMOD) */
+ {
+ "ALU_SMOD_X: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG_OFF(BPF_MOD, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_K off=1 (SMOD) */
+ {
+ "ALU_SMOD_K: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU32_IMM_OFF(BPF_MOD, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU64 | BPF_MOD | BPF_X off=1 (SMOD64) */
+ {
+ "ALU64_SMOD_X: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG_OFF(BPF_MOD, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU64 | BPF_MOD | BPF_K off=1 (SMOD64) */
+ {
+ "ALU64_SMOD_X: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU64_IMM_OFF(BPF_MOD, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
/* BPF_ALU | BPF_AND | BPF_X */
{
"ALU_AND_X: 3 & 2 = 2",
@@ -7837,6 +8035,104 @@ static struct bpf_test tests[] = {
{ },
{ { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
},
+ /* BSWAP */
+ {
+ "BSWAP 16: 0x0123456789abcdef -> 0xefcd",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xefcd } },
+ },
+ {
+ "BSWAP 32: 0x0123456789abcdef -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xefcdab89 } },
+ },
+ {
+ "BSWAP 64: 0x0123456789abcdef -> 0x67452301",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x67452301 } },
+ },
+ {
+ "BSWAP 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xefcdab89 } },
+ },
+ /* BSWAP, reversed */
+ {
+ "BSWAP 16: 0xfedcba9876543210 -> 0x1032",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1032 } },
+ },
+ {
+ "BSWAP 32: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x10325476 } },
+ },
+ {
+ "BSWAP 64: 0xfedcba9876543210 -> 0x98badcfe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x98badcfe } },
+ },
+ {
+ "BSWAP 64: 0xfedcba9876543210 >> 32 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x10325476 } },
+ },
/* BPF_LDX_MEM B/H/W/DW */
{
"BPF_LDX_MEM | BPF_B, base",
@@ -8228,6 +8524,67 @@ static struct bpf_test tests[] = {
{ { 32, 0 } },
.stack_depth = 0,
},
+ /* BPF_LDX_MEMSX B/H/W */
+ {
+ "BPF_LDX_MEMSX | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xdead0000000000f0ULL),
+ BPF_LD_IMM64(R2, 0xfffffffffffffff0ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEMSX(BPF_B, R0, R10, -1),
+#else
+ BPF_LDX_MEMSX(BPF_B, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEMSX | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xdead00000000f123ULL),
+ BPF_LD_IMM64(R2, 0xfffffffffffff123ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEMSX(BPF_H, R0, R10, -2),
+#else
+ BPF_LDX_MEMSX(BPF_H, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEMSX | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x00000000deadbeefULL),
+ BPF_LD_IMM64(R2, 0xffffffffdeadbeefULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEMSX(BPF_W, R0, R10, -4),
+#else
+ BPF_LDX_MEMSX(BPF_W, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
/* BPF_STX_MEM B/H/W/DW */
{
"BPF_STX_MEM | BPF_B",
@@ -9474,6 +9831,20 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP32 | BPF_JA */
+ {
+ "JMP32_JA: Unconditional jump: if (true) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_JMP32_IMM(BPF_JA, 0, 1, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JSLT | BPF_K */
{
"JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index d37831b8511c..8b06bab5c406 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -562,7 +562,6 @@ void kasan_restore_multi_shot(bool enabled);
* code. Declared here to avoid warnings about missing declarations.
*/
-asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
void __asan_register_globals(void *globals, ssize_t size);
void __asan_unregister_globals(void *globals, ssize_t size);
void __asan_handle_no_return(void);
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index c7236daa2415..9fa0b246902b 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -664,7 +664,7 @@ out_unlock:
sendit:
if (skb->sk)
- skb->priority = skb->sk->sk_priority;
+ skb->priority = READ_ONCE(skb->sk->sk_priority);
if (dev_queue_xmit(skb))
goto drop;
sent:
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 5db805d5f74d..558e158c98d0 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -939,7 +939,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
sock_init_data(NULL, sk);
sk->sk_type = osk->sk_type;
- sk->sk_priority = osk->sk_priority;
+ sk->sk_priority = READ_ONCE(osk->sk_priority);
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 3bdfc3f1e73d..e50d3d102078 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1615,7 +1615,7 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
return ERR_PTR(-ENOTCONN);
}
- skb->priority = sk->sk_priority;
+ skb->priority = READ_ONCE(sk->sk_priority);
bt_cb(skb)->l2cap.chan = chan;
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index b28c976f52a0..14c431663233 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -884,7 +884,7 @@ static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
skcb = j1939_skb_to_cb(skb);
memset(skcb, 0, sizeof(*skcb));
skcb->addr = jsk->addr;
- skcb->priority = j1939_prio(sk->sk_priority);
+ skcb->priority = j1939_prio(READ_ONCE(sk->sk_priority));
if (msg->msg_name) {
struct sockaddr_can *addr = msg->msg_name;
diff --git a/net/can/raw.c b/net/can/raw.c
index d50c3f3d892f..73468d2ebd51 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -881,7 +881,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
skb->dev = dev;
- skb->priority = sk->sk_priority;
+ skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = READ_ONCE(sk->sk_mark);
skb->tstamp = sockc.transmit_time;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index faabad6603db..f263f7e91a21 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1136,6 +1136,7 @@ static int build_initial_monmap(struct ceph_mon_client *monc)
GFP_KERNEL);
if (!monc->monmap)
return -ENOMEM;
+ monc->monmap->num_mon = num_mon;
for (i = 0; i < num_mon; i++) {
struct ceph_entity_inst *inst = &monc->monmap->mon_inst[i];
@@ -1147,7 +1148,6 @@ static int build_initial_monmap(struct ceph_mon_client *monc)
inst->name.type = CEPH_ENTITY_TYPE_MON;
inst->name.num = cpu_to_le64(i);
}
- monc->monmap->num_mon = num_mon;
return 0;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 85df22f05c38..606a366cc209 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -9023,6 +9023,28 @@ bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
}
EXPORT_SYMBOL(netdev_port_same_parent_id);
+static void netdev_dpll_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+#if IS_ENABLED(CONFIG_DPLL)
+ rtnl_lock();
+ dev->dpll_pin = dpll_pin;
+ rtnl_unlock();
+#endif
+}
+
+void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+ WARN_ON(!dpll_pin);
+ netdev_dpll_pin_assign(dev, dpll_pin);
+}
+EXPORT_SYMBOL(netdev_dpll_pin_set);
+
+void netdev_dpll_pin_clear(struct net_device *dev)
+{
+ netdev_dpll_pin_assign(dev, NULL);
+}
+EXPORT_SYMBOL(netdev_dpll_pin_clear);
+
/**
* dev_change_proto_down - set carrier according to proto_down.
*
diff --git a/net/core/dst.c b/net/core/dst.c
index 980e2fd2f013..6838d3212c37 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -45,7 +45,7 @@ const struct dst_metrics dst_default_metrics = {
EXPORT_SYMBOL(dst_default_metrics);
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
- struct net_device *dev, int initial_ref, int initial_obsolete,
+ struct net_device *dev, int initial_obsolete,
unsigned short flags)
{
dst->dev = dev;
@@ -66,7 +66,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
dst->tclassid = 0;
#endif
dst->lwtstate = NULL;
- rcuref_init(&dst->__rcuref, initial_ref);
+ rcuref_init(&dst->__rcuref, 1);
INIT_LIST_HEAD(&dst->rt_uncached);
dst->__use = 0;
dst->lastuse = jiffies;
@@ -77,7 +77,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
EXPORT_SYMBOL(dst_init);
void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
- int initial_ref, int initial_obsolete, unsigned short flags)
+ int initial_obsolete, unsigned short flags)
{
struct dst_entry *dst;
@@ -90,7 +90,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
if (!dst)
return NULL;
- dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
+ dst_init(dst, ops, dev, initial_obsolete, flags);
return dst;
}
@@ -270,7 +270,7 @@ static void __metadata_dst_init(struct metadata_dst *md_dst,
struct dst_entry *dst;
dst = &md_dst->dst;
- dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
+ dst_init(dst, &dst_blackhole_ops, NULL, DST_OBSOLETE_NONE,
DST_METADATA | DST_NOCOUNT);
memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
md_dst->type = type;
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index c1aea8b756b6..fe61f85bcf33 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -5,6 +5,7 @@
#include <linux/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
+#include <net/xdp.h>
#include "netdev-genl-gen.h"
@@ -12,15 +13,24 @@ static int
netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
const struct genl_info *info)
{
+ u64 xdp_rx_meta = 0;
void *hdr;
hdr = genlmsg_iput(rsp, info);
if (!hdr)
return -EMSGSIZE;
+#define XDP_METADATA_KFUNC(_, flag, __, xmo) \
+ if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
+ xdp_rx_meta |= flag;
+XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+
if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
- netdev->xdp_features, NETDEV_A_DEV_PAD)) {
+ netdev->xdp_features, NETDEV_A_DEV_PAD) ||
+ nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
+ xdp_rx_meta, NETDEV_A_DEV_PAD)) {
genlmsg_cancel(rsp, hdr);
return -EINVAL;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f56b8d697014..5e865af82e5b 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -200,6 +200,7 @@
pf(VID_RND) /* Random VLAN ID */ \
pf(SVID_RND) /* Random SVLAN ID */ \
pf(NODE) /* Node memory alloc*/ \
+ pf(SHARED) /* Shared SKB */ \
#define pf(flag) flag##_SHIFT,
enum pkt_flags {
@@ -1198,7 +1199,8 @@ static ssize_t pktgen_if_write(struct file *file,
((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
return -ENOTSUPP;
- if (value > 0 && pkt_dev->n_imix_entries > 0)
+ if (value > 0 && (pkt_dev->n_imix_entries > 0 ||
+ !(pkt_dev->flags & F_SHARED)))
return -EINVAL;
i += len;
@@ -1257,6 +1259,10 @@ static ssize_t pktgen_if_write(struct file *file,
((pkt_dev->xmit_mode == M_START_XMIT) &&
(!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))))
return -ENOTSUPP;
+
+ if (value > 1 && !(pkt_dev->flags & F_SHARED))
+ return -EINVAL;
+
pkt_dev->burst = value < 1 ? 1 : value;
sprintf(pg_result, "OK: burst=%u", pkt_dev->burst);
return count;
@@ -1318,9 +1324,10 @@ static ssize_t pktgen_if_write(struct file *file,
return count;
}
if (!strcmp(name, "flag")) {
+ bool disable = false;
__u32 flag;
char f[32];
- bool disable = false;
+ char *end;
memset(f, 0, 32);
len = strn_len(&user_buffer[i], sizeof(f) - 1);
@@ -1332,28 +1339,42 @@ static ssize_t pktgen_if_write(struct file *file,
i += len;
flag = pktgen_read_flag(f, &disable);
-
if (flag) {
- if (disable)
+ if (disable) {
+ /* If "clone_skb", or "burst" parameters are
+ * configured, it means that the skb still
+ * needs to be referenced by the pktgen, so
+ * the skb must be shared.
+ */
+ if (flag == F_SHARED && (pkt_dev->clone_skb ||
+ pkt_dev->burst > 1))
+ return -EINVAL;
pkt_dev->flags &= ~flag;
- else
+ } else {
pkt_dev->flags |= flag;
- } else {
- sprintf(pg_result,
- "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
- f,
- "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
- "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, "
- "MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, "
- "QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, "
- "NO_TIMESTAMP, "
-#ifdef CONFIG_XFRM
- "IPSEC, "
-#endif
- "NODE_ALLOC\n");
+ }
+
+ sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
return count;
}
- sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
+
+ /* Unknown flag */
+ end = pkt_dev->result + sizeof(pkt_dev->result);
+ pg_result += sprintf(pg_result,
+ "Flag -:%s:- unknown\n"
+ "Available flags, (prepend ! to un-set flag):\n", f);
+
+ for (int n = 0; n < NR_PKT_FLAGS && pg_result < end; n++) {
+ if (!IS_ENABLED(CONFIG_XFRM) && n == IPSEC_SHIFT)
+ continue;
+ pg_result += snprintf(pg_result, end - pg_result,
+ "%s, ", pkt_flag_names[n]);
+ }
+ if (!WARN_ON_ONCE(pg_result >= end)) {
+ /* Remove the comma and whitespace at the end */
+ *(pg_result - 2) = '\0';
+ }
+
return count;
}
if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
@@ -3440,12 +3461,24 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
- unsigned int burst = READ_ONCE(pkt_dev->burst);
+ bool skb_shared = !!(READ_ONCE(pkt_dev->flags) & F_SHARED);
struct net_device *odev = pkt_dev->odev;
struct netdev_queue *txq;
+ unsigned int burst = 1;
struct sk_buff *skb;
+ int clone_skb = 0;
int ret;
+ /* If 'skb_shared' is false, the read of possible
+ * new values (if any) for 'burst' and 'clone_skb' will be skipped to
+ * prevent some concurrent changes from slipping in. And the stabilized
+ * config will be read in during the next run of pktgen_xmit.
+ */
+ if (skb_shared) {
+ burst = READ_ONCE(pkt_dev->burst);
+ clone_skb = READ_ONCE(pkt_dev->clone_skb);
+ }
+
/* If device is offline, then don't send */
if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
pktgen_stop_device(pkt_dev);
@@ -3462,7 +3495,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
/* If no skb or clone count exhausted then get new one */
if (!pkt_dev->skb || (pkt_dev->last_ok &&
- ++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
+ ++pkt_dev->clone_count >= clone_skb)) {
/* build a new pkt */
kfree_skb(pkt_dev->skb);
@@ -3483,7 +3516,8 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
skb = pkt_dev->skb;
skb->protocol = eth_type_trans(skb, skb->dev);
- refcount_add(burst, &skb->users);
+ if (skb_shared)
+ refcount_add(burst, &skb->users);
local_bh_disable();
do {
ret = netif_receive_skb(skb);
@@ -3491,6 +3525,10 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->errors++;
pkt_dev->sofar++;
pkt_dev->seq_num++;
+ if (unlikely(!skb_shared)) {
+ pkt_dev->skb = NULL;
+ break;
+ }
if (refcount_read(&skb->users) != burst) {
/* skb was queued by rps/rfs or taps,
* so cannot reuse this skb
@@ -3509,9 +3547,14 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
goto out; /* Skips xmit_mode M_START_XMIT */
} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
local_bh_disable();
- refcount_inc(&pkt_dev->skb->users);
+ if (skb_shared)
+ refcount_inc(&pkt_dev->skb->users);
ret = dev_queue_xmit(pkt_dev->skb);
+
+ if (!skb_shared && dev_xmit_complete(ret))
+ pkt_dev->skb = NULL;
+
switch (ret) {
case NET_XMIT_SUCCESS:
pkt_dev->sofar++;
@@ -3549,11 +3592,15 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->last_ok = 0;
goto unlock;
}
- refcount_add(burst, &pkt_dev->skb->users);
+ if (skb_shared)
+ refcount_add(burst, &pkt_dev->skb->users);
xmit_more:
ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
+ if (!skb_shared && dev_xmit_complete(ret))
+ pkt_dev->skb = NULL;
+
switch (ret) {
case NETDEV_TX_OK:
pkt_dev->last_ok = 1;
@@ -3575,7 +3622,8 @@ xmit_more:
fallthrough;
case NETDEV_TX_BUSY:
/* Retry it next time */
- refcount_dec(&(pkt_dev->skb->users));
+ if (skb_shared)
+ refcount_dec(&pkt_dev->skb->users);
pkt_dev->last_ok = 0;
}
if (unlikely(burst))
@@ -3588,7 +3636,8 @@ out:
/* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
- pktgen_wait_for_skb(pkt_dev);
+ if (pkt_dev->skb)
+ pktgen_wait_for_skb(pkt_dev);
/* Done with this */
pktgen_stop_device(pkt_dev);
@@ -3771,6 +3820,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->svlan_id = 0xffff;
pkt_dev->burst = 1;
pkt_dev->node = NUMA_NO_NODE;
+ pkt_dev->flags = F_SHARED; /* SKB shared by default */
err = pktgen_setup_dev(t->net, pkt_dev, ifname);
if (err)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4a2ec33bfb51..7452a6d190c5 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -57,6 +57,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/addrconf.h>
#endif
+#include <linux/dpll.h>
#include "dev.h"
@@ -1055,6 +1056,15 @@ static size_t rtnl_devlink_port_size(const struct net_device *dev)
return size;
}
+static size_t rtnl_dpll_pin_size(const struct net_device *dev)
+{
+ size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
+
+ size += dpll_msg_pin_handle_size(netdev_dpll_pin(dev));
+
+ return size;
+}
+
static noinline size_t if_nlmsg_size(const struct net_device *dev,
u32 ext_filter_mask)
{
@@ -1111,6 +1121,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_prop_list_size(dev)
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
+ rtnl_devlink_port_size(dev)
+ + rtnl_dpll_pin_size(dev)
+ 0;
}
@@ -1774,6 +1785,28 @@ nest_cancel:
return ret;
}
+static int rtnl_fill_dpll_pin(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct nlattr *dpll_pin_nest;
+ int ret;
+
+ dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN);
+ if (!dpll_pin_nest)
+ return -EMSGSIZE;
+
+ ret = dpll_msg_add_pin_handle(skb, netdev_dpll_pin(dev));
+ if (ret < 0)
+ goto nest_cancel;
+
+ nla_nest_end(skb, dpll_pin_nest);
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(skb, dpll_pin_nest);
+ return ret;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb,
struct net_device *dev, struct net *src_net,
int type, u32 pid, u32 seq, u32 change,
@@ -1916,6 +1949,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
if (rtnl_fill_devlink_port(skb, dev))
goto nla_put_failure;
+ if (rtnl_fill_dpll_pin(skb, dev))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4eaf7ed0d1f4..da3f96bdd6f6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -847,6 +847,8 @@ EXPORT_SYMBOL(__napi_alloc_skb);
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize)
{
+ DEBUG_NET_WARN_ON_ONCE(size > truesize);
+
skb_fill_page_desc(skb, i, page, off, size);
skb->len += size;
skb->data_len += size;
@@ -859,6 +861,8 @@ void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ DEBUG_NET_WARN_ON_ONCE(size > truesize);
+
skb_frag_size_add(frag, size);
skb->len += size;
skb->data_len += size;
@@ -3718,10 +3722,19 @@ EXPORT_SYMBOL(skb_dequeue_tail);
void skb_queue_purge_reason(struct sk_buff_head *list,
enum skb_drop_reason reason)
{
- struct sk_buff *skb;
+ struct sk_buff_head tmp;
+ unsigned long flags;
+
+ if (skb_queue_empty_lockless(list))
+ return;
+
+ __skb_queue_head_init(&tmp);
+
+ spin_lock_irqsave(&list->lock, flags);
+ skb_queue_splice_init(list, &tmp);
+ spin_unlock_irqrestore(&list->lock, flags);
- while ((skb = skb_dequeue(list)) != NULL)
- kfree_skb_reason(skb, reason);
+ __skb_queue_purge_reason(&tmp, reason);
}
EXPORT_SYMBOL(skb_queue_purge_reason);
diff --git a/net/core/sock.c b/net/core/sock.c
index 16584e2dd648..290165954379 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -600,7 +600,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
dst, cookie) == NULL) {
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
dst_release(dst);
return NULL;
@@ -759,7 +759,7 @@ out:
return ret;
}
-bool sk_mc_loop(struct sock *sk)
+bool sk_mc_loop(const struct sock *sk)
{
if (dev_recursion_level())
return false;
@@ -771,7 +771,7 @@ bool sk_mc_loop(struct sock *sk)
return inet_test_bit(MC_LOOP, sk);
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- return inet6_sk(sk)->mc_loop;
+ return inet6_test_bit(MC6_LOOP, sk);
#endif
}
WARN_ON_ONCE(1);
@@ -806,9 +806,7 @@ EXPORT_SYMBOL(sock_no_linger);
void sock_set_priority(struct sock *sk, u32 priority)
{
- lock_sock(sk);
WRITE_ONCE(sk->sk_priority, priority);
- release_sock(sk);
}
EXPORT_SYMBOL(sock_set_priority);
@@ -1118,6 +1116,83 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
valbool = val ? 1 : 0;
+ /* handle options which do not require locking the socket. */
+ switch (optname) {
+ case SO_PRIORITY:
+ if ((val >= 0 && val <= 6) ||
+ sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
+ sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+ sock_set_priority(sk, val);
+ return 0;
+ }
+ return -EPERM;
+ case SO_PASSSEC:
+ assign_bit(SOCK_PASSSEC, &sock->flags, valbool);
+ return 0;
+ case SO_PASSCRED:
+ assign_bit(SOCK_PASSCRED, &sock->flags, valbool);
+ return 0;
+ case SO_PASSPIDFD:
+ assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool);
+ return 0;
+ case SO_TYPE:
+ case SO_PROTOCOL:
+ case SO_DOMAIN:
+ case SO_ERROR:
+ return -ENOPROTOOPT;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ case SO_BUSY_POLL:
+ if (val < 0)
+ return -EINVAL;
+ WRITE_ONCE(sk->sk_ll_usec, val);
+ return 0;
+ case SO_PREFER_BUSY_POLL:
+ if (valbool && !sockopt_capable(CAP_NET_ADMIN))
+ return -EPERM;
+ WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
+ return 0;
+ case SO_BUSY_POLL_BUDGET:
+ if (val > READ_ONCE(sk->sk_busy_poll_budget) &&
+ !sockopt_capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (val < 0 || val > U16_MAX)
+ return -EINVAL;
+ WRITE_ONCE(sk->sk_busy_poll_budget, val);
+ return 0;
+#endif
+ case SO_MAX_PACING_RATE:
+ {
+ unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
+ unsigned long pacing_rate;
+
+ if (sizeof(ulval) != sizeof(val) &&
+ optlen >= sizeof(ulval) &&
+ copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
+ return -EFAULT;
+ }
+ if (ulval != ~0UL)
+ cmpxchg(&sk->sk_pacing_status,
+ SK_PACING_NONE,
+ SK_PACING_NEEDED);
+ /* Pairs with READ_ONCE() from sk_getsockopt() */
+ WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
+ pacing_rate = READ_ONCE(sk->sk_pacing_rate);
+ if (ulval < pacing_rate)
+ WRITE_ONCE(sk->sk_pacing_rate, ulval);
+ return 0;
+ }
+ case SO_TXREHASH:
+ if (val < -1 || val > 1)
+ return -EINVAL;
+ if ((u8)val == SOCK_TXREHASH_DEFAULT)
+ val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
+ /* Paired with READ_ONCE() in tcp_rtx_synack()
+ * and sk_getsockopt().
+ */
+ WRITE_ONCE(sk->sk_txrehash, (u8)val);
+ return 0;
+ }
+
sockopt_lock_sock(sk);
switch (optname) {
@@ -1133,12 +1208,6 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
case SO_REUSEPORT:
sk->sk_reuseport = valbool;
break;
- case SO_TYPE:
- case SO_PROTOCOL:
- case SO_DOMAIN:
- case SO_ERROR:
- ret = -ENOPROTOOPT;
- break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
sk_dst_reset(sk);
@@ -1213,15 +1282,6 @@ set_sndbuf:
sk->sk_no_check_tx = valbool;
break;
- case SO_PRIORITY:
- if ((val >= 0 && val <= 6) ||
- sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
- sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
- WRITE_ONCE(sk->sk_priority, val);
- else
- ret = -EPERM;
- break;
-
case SO_LINGER:
if (optlen < sizeof(ling)) {
ret = -EINVAL; /* 1003.1g */
@@ -1247,14 +1307,6 @@ set_sndbuf:
case SO_BSDCOMPAT:
break;
- case SO_PASSCRED:
- assign_bit(SOCK_PASSCRED, &sock->flags, valbool);
- break;
-
- case SO_PASSPIDFD:
- assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool);
- break;
-
case SO_TIMESTAMP_OLD:
case SO_TIMESTAMP_NEW:
case SO_TIMESTAMPNS_OLD:
@@ -1360,9 +1412,6 @@ set_sndbuf:
sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
break;
- case SO_PASSSEC:
- assign_bit(SOCK_PASSSEC, &sock->flags, valbool);
- break;
case SO_MARK:
if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
@@ -1404,50 +1453,7 @@ set_sndbuf:
sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
break;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- case SO_BUSY_POLL:
- if (val < 0)
- ret = -EINVAL;
- else
- WRITE_ONCE(sk->sk_ll_usec, val);
- break;
- case SO_PREFER_BUSY_POLL:
- if (valbool && !sockopt_capable(CAP_NET_ADMIN))
- ret = -EPERM;
- else
- WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
- break;
- case SO_BUSY_POLL_BUDGET:
- if (val > READ_ONCE(sk->sk_busy_poll_budget) && !sockopt_capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- } else {
- if (val < 0 || val > U16_MAX)
- ret = -EINVAL;
- else
- WRITE_ONCE(sk->sk_busy_poll_budget, val);
- }
- break;
-#endif
-
- case SO_MAX_PACING_RATE:
- {
- unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
- if (sizeof(ulval) != sizeof(val) &&
- optlen >= sizeof(ulval) &&
- copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
- ret = -EFAULT;
- break;
- }
- if (ulval != ~0UL)
- cmpxchg(&sk->sk_pacing_status,
- SK_PACING_NONE,
- SK_PACING_NEEDED);
- /* Pairs with READ_ONCE() from sk_getsockopt() */
- WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
- sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
- break;
- }
case SO_INCOMING_CPU:
reuseport_update_incoming_cpu(sk, val);
break;
@@ -1532,19 +1538,6 @@ set_sndbuf:
break;
}
- case SO_TXREHASH:
- if (val < -1 || val > 1) {
- ret = -EINVAL;
- break;
- }
- if ((u8)val == SOCK_TXREHASH_DEFAULT)
- val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
- /* Paired with READ_ONCE() in tcp_rtx_synack()
- * and sk_getsockopt().
- */
- WRITE_ONCE(sk->sk_txrehash, (u8)val);
- break;
-
default:
ret = -ENOPROTOOPT;
break;
@@ -3001,6 +2994,11 @@ void __sk_flush_backlog(struct sock *sk)
{
spin_lock_bh(&sk->sk_lock.slock);
__release_sock(sk);
+
+ if (sk->sk_prot->release_cb)
+ INDIRECT_CALL_INET_1(sk->sk_prot->release_cb,
+ tcp_release_cb, sk);
+
spin_unlock_bh(&sk->sk_lock.slock);
}
EXPORT_SYMBOL_GPL(__sk_flush_backlog);
@@ -3519,11 +3517,9 @@ void release_sock(struct sock *sk)
if (sk->sk_backlog.tail)
__release_sock(sk);
- /* Warning : release_cb() might need to release sk ownership,
- * ie call sock_release_ownership(sk) before us.
- */
if (sk->sk_prot->release_cb)
- sk->sk_prot->release_cb(sk);
+ INDIRECT_CALL_INET_1(sk->sk_prot->release_cb,
+ tcp_release_cb, sk);
sock_release_ownership(sk);
if (waitqueue_active(&sk->sk_lock.wq))
diff --git a/net/core/xdp.c b/net/core/xdp.c
index a70670fe9a2d..df4789ab512d 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -741,7 +741,7 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
__diag_pop();
BTF_SET8_START(xdp_metadata_kfunc_ids)
-#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
+#define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
BTF_SET8_END(xdp_metadata_kfunc_ids)
@@ -752,7 +752,7 @@ static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
};
BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)
-#define XDP_METADATA_KFUNC(name, str) BTF_ID(func, str)
+#define XDP_METADATA_KFUNC(name, _, str, __) BTF_ID(func, str)
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 69453b936bd5..1b8cbfda6e5d 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -511,7 +511,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
rcu_dereference(ireq->ireq_opt),
- inet_sk(sk)->tos);
+ READ_ONCE(inet_sk(sk)->tos));
rcu_read_unlock();
err = net_xmit_eval(err);
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index c693a570682f..8d344b219f84 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -180,7 +180,7 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out;
}
- if (!sock_owned_by_user(sk) && np->recverr) {
+ if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
sk->sk_err = err;
sk_error_report(sk);
} else {
@@ -239,7 +239,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
if (!opt)
opt = rcu_dereference(np->opt);
err = ip6_xmit(sk, skb, &fl6, READ_ONCE(sk->sk_mark), opt,
- np->tclass, sk->sk_priority);
+ np->tclass, READ_ONCE(sk->sk_priority));
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -671,10 +671,10 @@ ipv6_pktoptions:
if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
np->mcast_oif = inet6_iif(opt_skb);
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
- np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
+ WRITE_ONCE(np->mcast_hops, ipv6_hdr(opt_skb)->hop_limit);
if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
- if (np->repflow)
+ if (inet6_test_bit(REPFLOW, sk))
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb,
&DCCP_SKB_CB(opt_skb)->header.h6)) {
@@ -839,7 +839,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
memset(&fl6, 0, sizeof(fl6));
- if (np->sndflow) {
+ if (inet6_test_bit(SNDFLOW, sk)) {
fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
IP6_ECN_flow_init(fl6.flowlabel);
if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
diff --git a/net/devlink/core.c b/net/devlink/core.c
index 6cec4afb01fb..bcbbb952569f 100644
--- a/net/devlink/core.c
+++ b/net/devlink/core.c
@@ -16,6 +16,219 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
+static struct devlink *devlinks_xa_get(unsigned long index)
+{
+ struct devlink *devlink;
+
+ rcu_read_lock();
+ devlink = xa_find(&devlinks, &index, index, DEVLINK_REGISTERED);
+ if (!devlink || !devlink_try_get(devlink))
+ devlink = NULL;
+ rcu_read_unlock();
+ return devlink;
+}
+
+/* devlink_rels xarray contains 1:1 relationships between
+ * devlink object and related nested devlink instance.
+ * The xarray index is used to get the nested object from
+ * the nested-in object code.
+ */
+static DEFINE_XARRAY_FLAGS(devlink_rels, XA_FLAGS_ALLOC1);
+
+#define DEVLINK_REL_IN_USE XA_MARK_0
+
+struct devlink_rel {
+ u32 index;
+ refcount_t refcount;
+ u32 devlink_index;
+ struct {
+ u32 devlink_index;
+ u32 obj_index;
+ devlink_rel_notify_cb_t *notify_cb;
+ devlink_rel_cleanup_cb_t *cleanup_cb;
+ struct work_struct notify_work;
+ } nested_in;
+};
+
+static void devlink_rel_free(struct devlink_rel *rel)
+{
+ xa_erase(&devlink_rels, rel->index);
+ kfree(rel);
+}
+
+static void __devlink_rel_get(struct devlink_rel *rel)
+{
+ refcount_inc(&rel->refcount);
+}
+
+static void __devlink_rel_put(struct devlink_rel *rel)
+{
+ if (refcount_dec_and_test(&rel->refcount))
+ devlink_rel_free(rel);
+}
+
+static void devlink_rel_nested_in_notify_work(struct work_struct *work)
+{
+ struct devlink_rel *rel = container_of(work, struct devlink_rel,
+ nested_in.notify_work);
+ struct devlink *devlink;
+
+ devlink = devlinks_xa_get(rel->nested_in.devlink_index);
+ if (!devlink)
+ goto rel_put;
+ if (!devl_trylock(devlink)) {
+ devlink_put(devlink);
+ goto reschedule_work;
+ }
+ if (!devl_is_registered(devlink)) {
+ devl_unlock(devlink);
+ devlink_put(devlink);
+ goto rel_put;
+ }
+ if (!xa_get_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE))
+ rel->nested_in.cleanup_cb(devlink, rel->nested_in.obj_index, rel->index);
+ rel->nested_in.notify_cb(devlink, rel->nested_in.obj_index);
+ devl_unlock(devlink);
+ devlink_put(devlink);
+
+rel_put:
+ __devlink_rel_put(rel);
+ return;
+
+reschedule_work:
+ schedule_work(&rel->nested_in.notify_work);
+}
+
+static void devlink_rel_nested_in_notify_work_schedule(struct devlink_rel *rel)
+{
+ __devlink_rel_get(rel);
+ schedule_work(&rel->nested_in.notify_work);
+}
+
+static struct devlink_rel *devlink_rel_alloc(void)
+{
+ struct devlink_rel *rel;
+ static u32 next;
+ int err;
+
+ rel = kzalloc(sizeof(*rel), GFP_KERNEL);
+ if (!rel)
+ return ERR_PTR(-ENOMEM);
+
+ err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel,
+ xa_limit_32b, &next, GFP_KERNEL);
+ if (err) {
+ kfree(rel);
+ return ERR_PTR(err);
+ }
+
+ refcount_set(&rel->refcount, 1);
+ INIT_WORK(&rel->nested_in.notify_work,
+ &devlink_rel_nested_in_notify_work);
+ return rel;
+}
+
+static void devlink_rel_put(struct devlink *devlink)
+{
+ struct devlink_rel *rel = devlink->rel;
+
+ if (!rel)
+ return;
+ xa_clear_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE);
+ devlink_rel_nested_in_notify_work_schedule(rel);
+ __devlink_rel_put(rel);
+ devlink->rel = NULL;
+}
+
+void devlink_rel_nested_in_clear(u32 rel_index)
+{
+ xa_clear_mark(&devlink_rels, rel_index, DEVLINK_REL_IN_USE);
+}
+
+int devlink_rel_nested_in_add(u32 *rel_index, u32 devlink_index,
+ u32 obj_index, devlink_rel_notify_cb_t *notify_cb,
+ devlink_rel_cleanup_cb_t *cleanup_cb,
+ struct devlink *devlink)
+{
+ struct devlink_rel *rel = devlink_rel_alloc();
+
+ ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+
+ if (IS_ERR(rel))
+ return PTR_ERR(rel);
+
+ rel->devlink_index = devlink->index;
+ rel->nested_in.devlink_index = devlink_index;
+ rel->nested_in.obj_index = obj_index;
+ rel->nested_in.notify_cb = notify_cb;
+ rel->nested_in.cleanup_cb = cleanup_cb;
+ *rel_index = rel->index;
+ xa_set_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE);
+ devlink->rel = rel;
+ return 0;
+}
+
+void devlink_rel_nested_in_notify(struct devlink *devlink)
+{
+ struct devlink_rel *rel = devlink->rel;
+
+ if (!rel)
+ return;
+ devlink_rel_nested_in_notify_work_schedule(rel);
+}
+
+static struct devlink_rel *devlink_rel_find(unsigned long rel_index)
+{
+ return xa_find(&devlink_rels, &rel_index, rel_index,
+ DEVLINK_REL_IN_USE);
+}
+
+static struct devlink *devlink_rel_devlink_get_lock(u32 rel_index)
+{
+ struct devlink *devlink;
+ struct devlink_rel *rel;
+ u32 devlink_index;
+
+ if (!rel_index)
+ return NULL;
+ xa_lock(&devlink_rels);
+ rel = devlink_rel_find(rel_index);
+ if (rel)
+ devlink_index = rel->devlink_index;
+ xa_unlock(&devlink_rels);
+ if (!rel)
+ return NULL;
+ devlink = devlinks_xa_get(devlink_index);
+ if (!devlink)
+ return NULL;
+ devl_lock(devlink);
+ if (!devl_is_registered(devlink)) {
+ devl_unlock(devlink);
+ devlink_put(devlink);
+ return NULL;
+ }
+ return devlink;
+}
+
+int devlink_rel_devlink_handle_put(struct sk_buff *msg, struct devlink *devlink,
+ u32 rel_index, int attrtype,
+ bool *msg_updated)
+{
+ struct net *net = devlink_net(devlink);
+ struct devlink *rel_devlink;
+ int err;
+
+ rel_devlink = devlink_rel_devlink_get_lock(rel_index);
+ if (!rel_devlink)
+ return 0;
+ err = devlink_nl_put_nested_handle(msg, net, rel_devlink, attrtype);
+ devl_unlock(rel_devlink);
+ devlink_put(rel_devlink);
+ if (!err && msg_updated)
+ *msg_updated = true;
+ return err;
+}
+
void *devlink_priv(struct devlink *devlink)
{
return &devlink->priv;
@@ -142,6 +355,7 @@ int devl_register(struct devlink *devlink)
xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
devlink_notify_register(devlink);
+ devlink_rel_nested_in_notify(devlink);
return 0;
}
@@ -166,6 +380,7 @@ void devl_unregister(struct devlink *devlink)
devlink_notify_unregister(devlink);
xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
+ devlink_rel_put(devlink);
}
EXPORT_SYMBOL_GPL(devl_unregister);
@@ -215,6 +430,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
xa_init_flags(&devlink->params, XA_FLAGS_ALLOC);
xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
+ xa_init_flags(&devlink->nested_rels, XA_FLAGS_ALLOC);
write_pnet(&devlink->_net, net);
INIT_LIST_HEAD(&devlink->rate_list);
INIT_LIST_HEAD(&devlink->linecard_list);
@@ -261,6 +477,7 @@ void devlink_free(struct devlink *devlink)
WARN_ON(!list_empty(&devlink->linecard_list));
WARN_ON(!xa_empty(&devlink->ports));
+ xa_destroy(&devlink->nested_rels);
xa_destroy(&devlink->snapshot_ids);
xa_destroy(&devlink->params);
xa_destroy(&devlink->ports);
diff --git a/net/devlink/dev.c b/net/devlink/dev.c
index bba4ace7d22b..dc8039ca2b38 100644
--- a/net/devlink/dev.c
+++ b/net/devlink/dev.c
@@ -138,6 +138,23 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int devlink_nl_nested_fill(struct sk_buff *msg, struct devlink *devlink)
+{
+ unsigned long rel_index;
+ void *unused;
+ int err;
+
+ xa_for_each(&devlink->nested_rels, rel_index, unused) {
+ err = devlink_rel_devlink_handle_put(msg, devlink,
+ rel_index,
+ DEVLINK_ATTR_NESTED_DEVLINK,
+ NULL);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
enum devlink_command cmd, u32 portid,
u32 seq, int flags)
@@ -164,6 +181,10 @@ static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
goto dev_stats_nest_cancel;
nla_nest_end(msg, dev_stats);
+
+ if (devlink_nl_nested_fill(msg, devlink))
+ goto nla_put_failure;
+
genlmsg_end(msg, hdr);
return 0;
@@ -230,6 +251,34 @@ int devlink_nl_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
return devlink_nl_dumpit(msg, cb, devlink_nl_get_dump_one);
}
+static void devlink_rel_notify_cb(struct devlink *devlink, u32 obj_index)
+{
+ devlink_notify(devlink, DEVLINK_CMD_NEW);
+}
+
+static void devlink_rel_cleanup_cb(struct devlink *devlink, u32 obj_index,
+ u32 rel_index)
+{
+ xa_erase(&devlink->nested_rels, rel_index);
+}
+
+int devl_nested_devlink_set(struct devlink *devlink,
+ struct devlink *nested_devlink)
+{
+ u32 rel_index;
+ int err;
+
+ err = devlink_rel_nested_in_add(&rel_index, devlink->index, 0,
+ devlink_rel_notify_cb,
+ devlink_rel_cleanup_cb,
+ nested_devlink);
+ if (err)
+ return err;
+ return xa_insert(&devlink->nested_rels, rel_index,
+ xa_mk_value(0), GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(devl_nested_devlink_set);
+
void devlink_notify_register(struct devlink *devlink)
{
devlink_notify(devlink, DEVLINK_CMD_NEW);
@@ -372,6 +421,7 @@ static void devlink_reload_netns_change(struct devlink *devlink,
devlink_notify_unregister(devlink);
write_pnet(&devlink->_net, dest_net);
devlink_notify_register(devlink);
+ devlink_rel_nested_in_notify(devlink);
}
int devlink_reload(struct devlink *devlink, struct net *dest_net,
diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h
index f6b5fea2e13c..741d1bf1bec8 100644
--- a/net/devlink/devl_internal.h
+++ b/net/devlink/devl_internal.h
@@ -17,6 +17,8 @@
#include "netlink_gen.h"
+struct devlink_rel;
+
#define DEVLINK_REGISTERED XA_MARK_1
#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
@@ -55,6 +57,8 @@ struct devlink {
u8 reload_failed:1;
refcount_t refcount;
struct rcu_work rwork;
+ struct devlink_rel *rel;
+ struct xarray nested_rels;
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -92,6 +96,20 @@ static inline bool devl_is_registered(struct devlink *devlink)
return xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
}
+typedef void devlink_rel_notify_cb_t(struct devlink *devlink, u32 obj_index);
+typedef void devlink_rel_cleanup_cb_t(struct devlink *devlink, u32 obj_index,
+ u32 rel_index);
+
+void devlink_rel_nested_in_clear(u32 rel_index);
+int devlink_rel_nested_in_add(u32 *rel_index, u32 devlink_index,
+ u32 obj_index, devlink_rel_notify_cb_t *notify_cb,
+ devlink_rel_cleanup_cb_t *cleanup_cb,
+ struct devlink *devlink);
+void devlink_rel_nested_in_notify(struct devlink *devlink);
+int devlink_rel_devlink_handle_put(struct sk_buff *msg, struct devlink *devlink,
+ u32 rel_index, int attrtype,
+ bool *msg_updated);
+
/* Netlink */
#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
@@ -145,6 +163,8 @@ devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
return 0;
}
+int devlink_nl_put_nested_handle(struct sk_buff *msg, struct net *net,
+ struct devlink *devlink, int attrtype);
int devlink_nl_msg_reply_and_new(struct sk_buff **msg, struct genl_info *info);
/* Notify */
@@ -206,19 +226,7 @@ int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
/* Linecards */
-struct devlink_linecard {
- struct list_head list;
- struct devlink *devlink;
- unsigned int index;
- const struct devlink_linecard_ops *ops;
- void *priv;
- enum devlink_linecard_state state;
- struct mutex state_lock; /* Protects state */
- const char *type;
- struct devlink_linecard_type *types;
- unsigned int types_count;
- struct devlink *nested_devlink;
-};
+unsigned int devlink_linecard_index(struct devlink_linecard *linecard);
/* Devlink nl cmds */
int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/devlink/linecard.c b/net/devlink/linecard.c
index 85c32c314b0f..9ff1813f88c5 100644
--- a/net/devlink/linecard.c
+++ b/net/devlink/linecard.c
@@ -6,6 +6,25 @@
#include "devl_internal.h"
+struct devlink_linecard {
+ struct list_head list;
+ struct devlink *devlink;
+ unsigned int index;
+ const struct devlink_linecard_ops *ops;
+ void *priv;
+ enum devlink_linecard_state state;
+ struct mutex state_lock; /* Protects state */
+ const char *type;
+ struct devlink_linecard_type *types;
+ unsigned int types_count;
+ u32 rel_index;
+};
+
+unsigned int devlink_linecard_index(struct devlink_linecard *linecard)
+{
+ return linecard->index;
+}
+
static struct devlink_linecard *
devlink_linecard_get_by_index(struct devlink *devlink,
unsigned int linecard_index)
@@ -46,24 +65,6 @@ devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info)
return devlink_linecard_get_from_attrs(devlink, info->attrs);
}
-static int devlink_nl_put_nested_handle(struct sk_buff *msg, struct devlink *devlink)
-{
- struct nlattr *nested_attr;
-
- nested_attr = nla_nest_start(msg, DEVLINK_ATTR_NESTED_DEVLINK);
- if (!nested_attr)
- return -EMSGSIZE;
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
-
- nla_nest_end(msg, nested_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, nested_attr);
- return -EMSGSIZE;
-}
-
struct devlink_linecard_type {
const char *type;
const void *priv;
@@ -111,8 +112,10 @@ static int devlink_nl_linecard_fill(struct sk_buff *msg,
nla_nest_end(msg, attr);
}
- if (linecard->nested_devlink &&
- devlink_nl_put_nested_handle(msg, linecard->nested_devlink))
+ if (devlink_rel_devlink_handle_put(msg, devlink,
+ linecard->rel_index,
+ DEVLINK_ATTR_NESTED_DEVLINK,
+ NULL))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -521,7 +524,6 @@ EXPORT_SYMBOL_GPL(devlink_linecard_provision_set);
void devlink_linecard_provision_clear(struct devlink_linecard *linecard)
{
mutex_lock(&linecard->state_lock);
- WARN_ON(linecard->nested_devlink);
linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
linecard->type = NULL;
devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
@@ -540,7 +542,6 @@ EXPORT_SYMBOL_GPL(devlink_linecard_provision_clear);
void devlink_linecard_provision_fail(struct devlink_linecard *linecard)
{
mutex_lock(&linecard->state_lock);
- WARN_ON(linecard->nested_devlink);
linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING_FAILED;
devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
mutex_unlock(&linecard->state_lock);
@@ -588,6 +589,27 @@ void devlink_linecard_deactivate(struct devlink_linecard *linecard)
}
EXPORT_SYMBOL_GPL(devlink_linecard_deactivate);
+static void devlink_linecard_rel_notify_cb(struct devlink *devlink,
+ u32 linecard_index)
+{
+ struct devlink_linecard *linecard;
+
+ linecard = devlink_linecard_get_by_index(devlink, linecard_index);
+ if (!linecard)
+ return;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+}
+
+static void devlink_linecard_rel_cleanup_cb(struct devlink *devlink,
+ u32 linecard_index, u32 rel_index)
+{
+ struct devlink_linecard *linecard;
+
+ linecard = devlink_linecard_get_by_index(devlink, linecard_index);
+ if (linecard && linecard->rel_index == rel_index)
+ linecard->rel_index = 0;
+}
+
/**
* devlink_linecard_nested_dl_set - Attach/detach nested devlink
* instance to linecard.
@@ -595,12 +617,14 @@ EXPORT_SYMBOL_GPL(devlink_linecard_deactivate);
* @linecard: devlink linecard
* @nested_devlink: devlink instance to attach or NULL to detach
*/
-void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
- struct devlink *nested_devlink)
+int devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
+ struct devlink *nested_devlink)
{
- mutex_lock(&linecard->state_lock);
- linecard->nested_devlink = nested_devlink;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
+ return devlink_rel_nested_in_add(&linecard->rel_index,
+ linecard->devlink->index,
+ linecard->index,
+ devlink_linecard_rel_notify_cb,
+ devlink_linecard_rel_cleanup_cb,
+ nested_devlink);
}
EXPORT_SYMBOL_GPL(devlink_linecard_nested_dl_set);
diff --git a/net/devlink/netlink.c b/net/devlink/netlink.c
index fc3e7c029a3b..499304d9de49 100644
--- a/net/devlink/netlink.c
+++ b/net/devlink/netlink.c
@@ -82,6 +82,32 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_REGION_DIRECT] = { .type = NLA_FLAG },
};
+int devlink_nl_put_nested_handle(struct sk_buff *msg, struct net *net,
+ struct devlink *devlink, int attrtype)
+{
+ struct nlattr *nested_attr;
+
+ nested_attr = nla_nest_start(msg, attrtype);
+ if (!nested_attr)
+ return -EMSGSIZE;
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (!net_eq(net, devlink_net(devlink))) {
+ int id = peernet2id_alloc(net, devlink_net(devlink),
+ GFP_KERNEL);
+
+ if (nla_put_s32(msg, DEVLINK_ATTR_NETNS_ID, id))
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(msg, nested_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, nested_attr);
+ return -EMSGSIZE;
+}
+
int devlink_nl_msg_reply_and_new(struct sk_buff **msg, struct genl_info *info)
{
int err;
diff --git a/net/devlink/port.c b/net/devlink/port.c
index 4763b42885fb..4e9003242448 100644
--- a/net/devlink/port.c
+++ b/net/devlink/port.c
@@ -428,6 +428,13 @@ devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *por
if (err)
goto out;
err = devlink_port_fn_state_fill(port, msg, extack, &msg_updated);
+ if (err)
+ goto out;
+ err = devlink_rel_devlink_handle_put(msg, port->devlink,
+ port->rel_index,
+ DEVLINK_PORT_FN_ATTR_DEVLINK,
+ &msg_updated);
+
out:
if (err || !msg_updated)
nla_nest_cancel(msg, function_attr);
@@ -483,7 +490,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg,
goto nla_put_failure;
if (devlink_port->linecard &&
nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX,
- devlink_port->linecard->index))
+ devlink_linecard_index(devlink_port->linecard)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -1392,6 +1399,50 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro
}
EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
+static void devlink_port_rel_notify_cb(struct devlink *devlink, u32 port_index)
+{
+ struct devlink_port *devlink_port;
+
+ devlink_port = devlink_port_get_by_index(devlink, port_index);
+ if (!devlink_port)
+ return;
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+}
+
+static void devlink_port_rel_cleanup_cb(struct devlink *devlink, u32 port_index,
+ u32 rel_index)
+{
+ struct devlink_port *devlink_port;
+
+ devlink_port = devlink_port_get_by_index(devlink, port_index);
+ if (devlink_port && devlink_port->rel_index == rel_index)
+ devlink_port->rel_index = 0;
+}
+
+/**
+ * devl_port_fn_devlink_set - Attach peer devlink
+ * instance to port function.
+ * @devlink_port: devlink port
+ * @fn_devlink: devlink instance to attach
+ */
+int devl_port_fn_devlink_set(struct devlink_port *devlink_port,
+ struct devlink *fn_devlink)
+{
+ ASSERT_DEVLINK_PORT_REGISTERED(devlink_port);
+
+ if (WARN_ON(devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_SF ||
+ devlink_port->attrs.pci_sf.external))
+ return -EINVAL;
+
+ return devlink_rel_nested_in_add(&devlink_port->rel_index,
+ devlink_port->devlink->index,
+ devlink_port->index,
+ devlink_port_rel_notify_cb,
+ devlink_port_rel_cleanup_cb,
+ fn_devlink);
+}
+EXPORT_SYMBOL_GPL(devl_port_fn_devlink_set);
+
/**
* devlink_port_linecard_set - Link port with a linecard
*
@@ -1420,7 +1471,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
if (devlink_port->linecard)
n = snprintf(name, len, "l%u",
- devlink_port->linecard->index);
+ devlink_linecard_index(devlink_port->linecard));
if (n < len)
n += snprintf(name + n, len - n, "p%u",
attrs->phys.port_number);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 37ab238e8304..5f01bd4f9dec 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -2024,7 +2024,8 @@ void dsa_shared_port_link_unregister_of(struct dsa_port *dp)
dsa_shared_port_setup_phy_of(dp, false);
}
-int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
+int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr,
+ struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
int err;
@@ -2034,7 +2035,7 @@ int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
dp->hsr_dev = hsr;
- err = ds->ops->port_hsr_join(ds, dp->index, hsr);
+ err = ds->ops->port_hsr_join(ds, dp->index, hsr, extack);
if (err)
dp->hsr_dev = NULL;
diff --git a/net/dsa/port.h b/net/dsa/port.h
index dc812512fd0e..334879964e2c 100644
--- a/net/dsa/port.h
+++ b/net/dsa/port.h
@@ -103,7 +103,8 @@ int dsa_port_phylink_create(struct dsa_port *dp);
void dsa_port_phylink_destroy(struct dsa_port *dp);
int dsa_shared_port_link_register_of(struct dsa_port *dp);
void dsa_shared_port_link_unregister_of(struct dsa_port *dp);
-int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
+int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr,
+ struct netlink_ext_ack *extack);
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 48db91b33390..4c3e502d7e16 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -457,6 +457,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ if (ds->ops->port_set_mac_address) {
+ err = ds->ops->port_set_mac_address(ds, dp->index,
+ addr->sa_data);
+ if (err)
+ return err;
+ }
+
/* If the port is down, the address isn't synced yet to hardware or
* to the DSA master, so there is nothing to change.
*/
@@ -2862,7 +2869,7 @@ static int dsa_slave_changeupper(struct net_device *dev,
}
} else if (is_hsr_master(info->upper_dev)) {
if (info->linking) {
- err = dsa_port_hsr_join(dp, info->upper_dev);
+ err = dsa_port_hsr_join(dp, info->upper_dev, extack);
if (err == -EOPNOTSUPP) {
NL_SET_ERR_MSG_WEAK_MOD(extack,
"Offloading not supported");
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index ea100bd25939..3632e47dea9e 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -293,6 +293,14 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
if (is_link_local_ether_addr(hdr->h_dest))
val |= KSZ9477_TAIL_TAG_OVERRIDE;
+ if (dev->features & NETIF_F_HW_HSR_DUP) {
+ struct net_device *hsr_dev = dp->hsr_dev;
+ struct dsa_port *other_dp;
+
+ dsa_hsr_foreach_port(other_dp, dp->ds, hsr_dev)
+ val |= BIT(other_dp->index);
+ }
+
*tag = cpu_to_be16(val);
return ksz_defer_xmit(dp, skb);
diff --git a/net/handshake/genl.c b/net/handshake/genl.c
index 233be5cbfec9..f55d14d7b726 100644
--- a/net/handshake/genl.c
+++ b/net/handshake/genl.c
@@ -18,7 +18,7 @@ static const struct nla_policy handshake_accept_nl_policy[HANDSHAKE_A_ACCEPT_HAN
/* HANDSHAKE_CMD_DONE - do */
static const struct nla_policy handshake_done_nl_policy[HANDSHAKE_A_DONE_REMOTE_AUTH + 1] = {
[HANDSHAKE_A_DONE_STATUS] = { .type = NLA_U32, },
- [HANDSHAKE_A_DONE_SOCKFD] = { .type = NLA_U32, },
+ [HANDSHAKE_A_DONE_SOCKFD] = { .type = NLA_S32, },
[HANDSHAKE_A_DONE_REMOTE_AUTH] = { .type = NLA_U32, },
};
diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
index d0bc1dd8e65a..64a0046dd611 100644
--- a/net/handshake/netlink.c
+++ b/net/handshake/netlink.c
@@ -163,7 +163,7 @@ int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD))
return -EINVAL;
- fd = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_SOCKFD]);
+ fd = nla_get_s32(info->attrs[HANDSHAKE_A_DONE_SOCKFD]);
sock = sockfd_lookup(fd, &err);
if (!sock)
diff --git a/net/handshake/tlshd.c b/net/handshake/tlshd.c
index bbfb4095ddd6..d697f68c598c 100644
--- a/net/handshake/tlshd.c
+++ b/net/handshake/tlshd.c
@@ -173,9 +173,9 @@ static int tls_handshake_put_certificate(struct sk_buff *msg,
if (!entry_attr)
return -EMSGSIZE;
- if (nla_put_u32(msg, HANDSHAKE_A_X509_CERT,
+ if (nla_put_s32(msg, HANDSHAKE_A_X509_CERT,
treq->th_certificate) ||
- nla_put_u32(msg, HANDSHAKE_A_X509_PRIVKEY,
+ nla_put_s32(msg, HANDSHAKE_A_X509_PRIVKEY,
treq->th_privkey)) {
nla_nest_cancel(msg, entry_attr);
return -EMSGSIZE;
@@ -214,7 +214,7 @@ static int tls_handshake_accept(struct handshake_req *req,
goto out_cancel;
ret = -EMSGSIZE;
- ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_SOCKFD, fd);
+ ret = nla_put_s32(msg, HANDSHAKE_A_ACCEPT_SOCKFD, fd);
if (ret < 0)
goto out_cancel;
ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_MESSAGE_TYPE, treq->th_type);
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index cb5dbee9e018..2cc50cbfc2a3 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -39,11 +39,11 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
saddr = inet->inet_saddr;
if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
if (!oif || netif_index_is_l3_master(sock_net(sk), oif))
- oif = inet->mc_index;
+ oif = READ_ONCE(inet->mc_index);
if (!saddr)
- saddr = inet->mc_addr;
+ saddr = READ_ONCE(inet->mc_addr);
} else if (!oif) {
- oif = inet->uc_index;
+ oif = READ_ONCE(inet->uc_index);
}
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr, oif,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 418e5fb58fd3..76c3ea75b8dd 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2944,8 +2944,6 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
continue;
state->im = rcu_dereference(state->idev->mc_list);
}
- if (!state->im)
- break;
spin_lock_bh(&state->im->lock);
psf = state->im->sources;
}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index e13a84433413..f01aee832aab 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -134,7 +134,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
* hence this needs to be included regardless of socket family.
*/
if (ext & (1 << (INET_DIAG_TOS - 1)))
- if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
+ if (nla_put_u8(skb, INET_DIAG_TOS, READ_ONCE(inet->tos)) < 0)
goto errout;
#if IS_ENABLED(CONFIG_IPV6)
@@ -165,7 +165,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
* For cgroup2 classid is always zero.
*/
if (!classid)
- classid = sk->sk_priority;
+ classid = READ_ONCE(sk->sk_priority);
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
goto errout;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4ab877cf6d35..89e62ed08dad 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -544,7 +544,7 @@ EXPORT_SYMBOL(__ip_queue_xmit);
int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
{
- return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
+ return __ip_queue_xmit(sk, skb, fl, READ_ONCE(inet_sk(sk)->tos));
}
EXPORT_SYMBOL(ip_queue_xmit);
@@ -1387,8 +1387,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
struct ip_options *opt = NULL;
struct rtable *rt = (struct rtable *)cork->dst;
struct iphdr *iph;
+ u8 pmtudisc, ttl;
__be16 df = 0;
- __u8 ttl;
skb = __skb_dequeue(queue);
if (!skb)
@@ -1418,8 +1418,9 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
/* DF bit is set when we want to see DF on outgoing frames.
* If ignore_df is set too, we still allow to fragment this frame
* locally. */
- if (inet->pmtudisc == IP_PMTUDISC_DO ||
- inet->pmtudisc == IP_PMTUDISC_PROBE ||
+ pmtudisc = READ_ONCE(inet->pmtudisc);
+ if (pmtudisc == IP_PMTUDISC_DO ||
+ pmtudisc == IP_PMTUDISC_PROBE ||
(skb->len <= dst_mtu(&rt->dst) &&
ip_dont_fragment(sk, &rt->dst)))
df = htons(IP_DF);
@@ -1430,14 +1431,14 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
if (cork->ttl != 0)
ttl = cork->ttl;
else if (rt->rt_type == RTN_MULTICAST)
- ttl = inet->mc_ttl;
+ ttl = READ_ONCE(inet->mc_ttl);
else
ttl = ip_select_ttl(inet, &rt->dst);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
- iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
+ iph->tos = (cork->tos != -1) ? cork->tos : READ_ONCE(inet->tos);
iph->frag_off = df;
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
@@ -1449,7 +1450,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
ip_options_build(skb, opt, cork->addr, rt);
}
- skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
+ skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
skb->mark = cork->mark;
skb->tstamp = cork->transmit_time;
/*
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index cce9cb25f3b3..0b74ac49d6a6 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -585,25 +585,20 @@ out:
return err;
}
-void __ip_sock_set_tos(struct sock *sk, int val)
+void ip_sock_set_tos(struct sock *sk, int val)
{
+ u8 old_tos = READ_ONCE(inet_sk(sk)->tos);
+
if (sk->sk_type == SOCK_STREAM) {
val &= ~INET_ECN_MASK;
- val |= inet_sk(sk)->tos & INET_ECN_MASK;
+ val |= old_tos & INET_ECN_MASK;
}
- if (inet_sk(sk)->tos != val) {
- inet_sk(sk)->tos = val;
+ if (old_tos != val) {
+ WRITE_ONCE(inet_sk(sk)->tos, val);
WRITE_ONCE(sk->sk_priority, rt_tos2priority(val));
sk_dst_reset(sk);
}
}
-
-void ip_sock_set_tos(struct sock *sk, int val)
-{
- lock_sock(sk);
- __ip_sock_set_tos(sk, val);
- release_sock(sk);
-}
EXPORT_SYMBOL(ip_sock_set_tos);
void ip_sock_set_freebind(struct sock *sk)
@@ -622,9 +617,7 @@ int ip_sock_set_mtu_discover(struct sock *sk, int val)
{
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
return -EINVAL;
- lock_sock(sk);
- inet_sk(sk)->pmtudisc = val;
- release_sock(sk);
+ WRITE_ONCE(inet_sk(sk)->pmtudisc, val);
return 0;
}
EXPORT_SYMBOL(ip_sock_set_mtu_discover);
@@ -1039,6 +1032,22 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
WRITE_ONCE(inet->min_ttl, val);
return 0;
+ case IP_MULTICAST_TTL:
+ if (sk->sk_type == SOCK_STREAM)
+ return -EINVAL;
+ if (optlen < 1)
+ return -EINVAL;
+ if (val == -1)
+ val = 1;
+ if (val < 0 || val > 255)
+ return -EINVAL;
+ WRITE_ONCE(inet->mc_ttl, val);
+ return 0;
+ case IP_MTU_DISCOVER:
+ return ip_sock_set_mtu_discover(sk, val);
+ case IP_TOS: /* This sets both TOS and Precedence */
+ ip_sock_set_tos(sk, val);
+ return 0;
}
err = 0;
@@ -1093,25 +1102,6 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
}
}
break;
- case IP_TOS: /* This sets both TOS and Precedence */
- __ip_sock_set_tos(sk, val);
- break;
- case IP_MTU_DISCOVER:
- if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
- goto e_inval;
- inet->pmtudisc = val;
- break;
- case IP_MULTICAST_TTL:
- if (sk->sk_type == SOCK_STREAM)
- goto e_inval;
- if (optlen < 1)
- goto e_inval;
- if (val == -1)
- val = 1;
- if (val < 0 || val > 255)
- goto e_inval;
- inet->mc_ttl = val;
- break;
case IP_UNICAST_IF:
{
struct net_device *dev = NULL;
@@ -1123,7 +1113,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
ifindex = (__force int)ntohl((__force __be32)val);
if (ifindex == 0) {
- inet->uc_index = 0;
+ WRITE_ONCE(inet->uc_index, 0);
err = 0;
break;
}
@@ -1140,7 +1130,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
if (sk->sk_bound_dev_if && midx != sk->sk_bound_dev_if)
break;
- inet->uc_index = ifindex;
+ WRITE_ONCE(inet->uc_index, ifindex);
err = 0;
break;
}
@@ -1178,8 +1168,8 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
if (!mreq.imr_ifindex) {
if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
- inet->mc_index = 0;
- inet->mc_addr = 0;
+ WRITE_ONCE(inet->mc_index, 0);
+ WRITE_ONCE(inet->mc_addr, 0);
err = 0;
break;
}
@@ -1204,8 +1194,8 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
midx != sk->sk_bound_dev_if)
break;
- inet->mc_index = mreq.imr_ifindex;
- inet->mc_addr = mreq.imr_address.s_addr;
+ WRITE_ONCE(inet->mc_index, mreq.imr_ifindex);
+ WRITE_ONCE(inet->mc_addr, mreq.imr_address.s_addr);
err = 0;
break;
}
@@ -1592,27 +1582,29 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_MINTTL:
val = READ_ONCE(inet->min_ttl);
goto copyval;
- }
-
- if (needs_rtnl)
- rtnl_lock();
- sockopt_lock_sock(sk);
-
- switch (optname) {
+ case IP_MULTICAST_TTL:
+ val = READ_ONCE(inet->mc_ttl);
+ goto copyval;
+ case IP_MTU_DISCOVER:
+ val = READ_ONCE(inet->pmtudisc);
+ goto copyval;
+ case IP_TOS:
+ val = READ_ONCE(inet->tos);
+ goto copyval;
case IP_OPTIONS:
{
unsigned char optbuf[sizeof(struct ip_options)+40];
struct ip_options *opt = (struct ip_options *)optbuf;
struct ip_options_rcu *inet_opt;
- inet_opt = rcu_dereference_protected(inet->inet_opt,
- lockdep_sock_is_held(sk));
+ rcu_read_lock();
+ inet_opt = rcu_dereference(inet->inet_opt);
opt->optlen = 0;
if (inet_opt)
memcpy(optbuf, &inet_opt->opt,
sizeof(struct ip_options) +
inet_opt->opt.optlen);
- sockopt_release_sock(sk);
+ rcu_read_unlock();
if (opt->optlen == 0) {
len = 0;
@@ -1628,12 +1620,6 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
return -EFAULT;
return 0;
}
- case IP_TOS:
- val = inet->tos;
- break;
- case IP_MTU_DISCOVER:
- val = inet->pmtudisc;
- break;
case IP_MTU:
{
struct dst_entry *dst;
@@ -1643,24 +1629,55 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
val = dst_mtu(dst);
dst_release(dst);
}
- if (!val) {
- sockopt_release_sock(sk);
+ if (!val)
return -ENOTCONN;
+ goto copyval;
+ }
+ case IP_PKTOPTIONS:
+ {
+ struct msghdr msg;
+
+ if (sk->sk_type != SOCK_STREAM)
+ return -ENOPROTOOPT;
+
+ if (optval.is_kernel) {
+ msg.msg_control_is_user = false;
+ msg.msg_control = optval.kernel;
+ } else {
+ msg.msg_control_is_user = true;
+ msg.msg_control_user = optval.user;
}
- break;
+ msg.msg_controllen = len;
+ msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
+
+ if (inet_test_bit(PKTINFO, sk)) {
+ struct in_pktinfo info;
+
+ info.ipi_addr.s_addr = READ_ONCE(inet->inet_rcv_saddr);
+ info.ipi_spec_dst.s_addr = READ_ONCE(inet->inet_rcv_saddr);
+ info.ipi_ifindex = READ_ONCE(inet->mc_index);
+ put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
+ }
+ if (inet_test_bit(TTL, sk)) {
+ int hlim = READ_ONCE(inet->mc_ttl);
+
+ put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
+ }
+ if (inet_test_bit(TOS, sk)) {
+ int tos = READ_ONCE(inet->rcv_tos);
+ put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
+ }
+ len -= msg.msg_controllen;
+ return copy_to_sockptr(optlen, &len, sizeof(int));
}
- case IP_MULTICAST_TTL:
- val = inet->mc_ttl;
- break;
case IP_UNICAST_IF:
- val = (__force int)htonl((__u32) inet->uc_index);
- break;
+ val = (__force int)htonl((__u32) READ_ONCE(inet->uc_index));
+ goto copyval;
case IP_MULTICAST_IF:
{
struct in_addr addr;
len = min_t(unsigned int, len, sizeof(struct in_addr));
- addr.s_addr = inet->mc_addr;
- sockopt_release_sock(sk);
+ addr.s_addr = READ_ONCE(inet->mc_addr);
if (copy_to_sockptr(optlen, &len, sizeof(int)))
return -EFAULT;
@@ -1668,6 +1685,13 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
return -EFAULT;
return 0;
}
+ }
+
+ if (needs_rtnl)
+ rtnl_lock();
+ sockopt_lock_sock(sk);
+
+ switch (optname) {
case IP_MSFILTER:
{
struct ip_msfilter msf;
@@ -1690,44 +1714,6 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
else
err = ip_get_mcast_msfilter(sk, optval, optlen, len);
goto out;
- case IP_PKTOPTIONS:
- {
- struct msghdr msg;
-
- sockopt_release_sock(sk);
-
- if (sk->sk_type != SOCK_STREAM)
- return -ENOPROTOOPT;
-
- if (optval.is_kernel) {
- msg.msg_control_is_user = false;
- msg.msg_control = optval.kernel;
- } else {
- msg.msg_control_is_user = true;
- msg.msg_control_user = optval.user;
- }
- msg.msg_controllen = len;
- msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
-
- if (inet_test_bit(PKTINFO, sk)) {
- struct in_pktinfo info;
-
- info.ipi_addr.s_addr = inet->inet_rcv_saddr;
- info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
- info.ipi_ifindex = inet->mc_index;
- put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
- }
- if (inet_test_bit(TTL, sk)) {
- int hlim = inet->mc_ttl;
- put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
- }
- if (inet_test_bit(TOS, sk)) {
- int tos = inet->rcv_tos;
- put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
- }
- len -= msg.msg_controllen;
- return copy_to_sockptr(optlen, &len, sizeof(int));
- }
case IP_LOCAL_PORT_RANGE:
val = inet->local_port_range.hi << 16 | inet->local_port_range.lo;
break;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 75e0aee35eb7..2c61f444e1c7 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -551,7 +551,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
case ICMP_DEST_UNREACH:
if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
ipv4_sk_update_pmtu(skb, sk, info);
- if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
+ if (READ_ONCE(inet_sock->pmtudisc) != IP_PMTUDISC_DONT) {
err = EMSGSIZE;
harderr = 1;
break;
@@ -581,7 +581,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
* 4.1.3.3.
*/
if ((family == AF_INET && !inet_test_bit(RECVERR, sk)) ||
- (family == AF_INET6 && !inet6_sk(sk)->recverr)) {
+ (family == AF_INET6 && !inet6_test_bit(RECVERR6, sk))) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
} else {
@@ -773,11 +773,11 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
- ipc.oif = inet->mc_index;
+ ipc.oif = READ_ONCE(inet->mc_index);
if (!saddr)
- saddr = inet->mc_addr;
+ saddr = READ_ONCE(inet->mc_addr);
} else if (!ipc.oif)
- ipc.oif = inet->uc_index;
+ ipc.oif = READ_ONCE(inet->uc_index);
flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, scope,
sk->sk_protocol, inet_sk_flowi_flags(sk), faddr,
@@ -899,7 +899,6 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *ip6 = ipv6_hdr(skb);
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
@@ -908,7 +907,7 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
sin6->sin6_port = 0;
sin6->sin6_addr = ip6->saddr;
sin6->sin6_flowinfo = 0;
- if (np->sndflow)
+ if (inet6_test_bit(SNDFLOW, sk))
sin6->sin6_flowinfo = ip6_flowinfo(ip6);
sin6->sin6_scope_id =
ipv6_iface_scope_id(&sin6->sin6_addr,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 4b5db5d1edc2..27da9d7294c0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -239,7 +239,7 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
if (code > NR_ICMP_UNREACH)
break;
if (code == ICMP_FRAG_NEEDED) {
- harderr = inet->pmtudisc != IP_PMTUDISC_DONT;
+ harderr = READ_ONCE(inet->pmtudisc) != IP_PMTUDISC_DONT;
err = EMSGSIZE;
} else {
err = icmp_err_convert[code].errno;
@@ -482,7 +482,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int free = 0;
__be32 daddr;
__be32 saddr;
- int err;
+ int uc_index, err;
struct ip_options_data opt_copy;
struct raw_frag_vec rfv;
int hdrincl;
@@ -576,24 +576,25 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
tos = get_rttos(&ipc, inet);
scope = ip_sendmsg_scope(inet, &ipc, msg);
+ uc_index = READ_ONCE(inet->uc_index);
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
- ipc.oif = inet->mc_index;
+ ipc.oif = READ_ONCE(inet->mc_index);
if (!saddr)
- saddr = inet->mc_addr;
+ saddr = READ_ONCE(inet->mc_addr);
} else if (!ipc.oif) {
- ipc.oif = inet->uc_index;
- } else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
+ ipc.oif = uc_index;
+ } else if (ipv4_is_lbcast(daddr) && uc_index) {
/* oif is set, packet is to local broadcast
* and uc_index is set. oif is most likely set
* by sk_bound_dev_if. If uc_index != oif check if the
* oif is an L3 master and uc_index is an L3 slave.
* If so, we want to allow the send using the uc_index.
*/
- if (ipc.oif != inet->uc_index &&
+ if (ipc.oif != uc_index &&
ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
- inet->uc_index)) {
- ipc.oif = inet->uc_index;
+ uc_index)) {
+ ipc.oif = uc_index;
}
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b214b5a2e045..e2bf4602b559 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1632,7 +1632,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
{
struct rtable *rt;
- rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
+ rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
(noxfrm ? DST_NOXFRM : 0));
if (rt) {
@@ -1660,7 +1660,7 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
{
struct rtable *new_rt;
- new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
+ new_rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
rt->dst.flags);
if (new_rt) {
@@ -2834,7 +2834,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
struct rtable *ort = (struct rtable *) dst_orig;
struct rtable *rt;
- rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
+ rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
if (rt) {
struct dst_entry *new = &rt->dst;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 6ac890b4073f..e7f024d93572 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1367,6 +1367,15 @@ static struct ctl_table ipv4_net_table[] = {
.extra1 = SYSCTL_ZERO,
},
{
+ .procname = "tcp_backlog_ack_defer",
+ .data = &init_net.ipv4.sysctl_tcp_backlog_ack_defer,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
.procname = "tcp_reflect_tos",
.data = &init_net.ipv4.sysctl_tcp_reflect_tos,
.maxlen = sizeof(u8),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3f66cdeef7de..9a8b134d8ada 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3756,7 +3756,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_options |= TCPI_OPT_SYN_DATA;
info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
- info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
+ info->tcpi_ato = jiffies_to_usecs(min(icsk->icsk_ack.ato,
+ tcp_delack_max(sk)));
info->tcpi_snd_mss = tp->mss_cache;
info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
@@ -3812,6 +3813,15 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_rcv_wnd = tp->rcv_wnd;
info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash;
info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
+
+ info->tcpi_total_rto = tp->total_rto;
+ info->tcpi_total_rto_recoveries = tp->total_rto_recoveries;
+ info->tcpi_total_rto_time = tp->total_rto_time;
+ if (tp->rto_stamp) {
+ info->tcpi_total_rto_time += tcp_time_stamp_raw() -
+ tp->rto_stamp;
+ }
+
unlock_sock_fast(sk, slow);
}
EXPORT_SYMBOL_GPL(tcp_get_info);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 146792cd26fe..22358032dd48 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -258,7 +258,7 @@ static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
u64 rate = bw;
rate = bbr_rate_bytes_per_sec(sk, rate, gain);
- rate = min_t(u64, rate, sk->sk_max_pacing_rate);
+ rate = min_t(u64, rate, READ_ONCE(sk->sk_max_pacing_rate));
return rate;
}
@@ -278,7 +278,8 @@ static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
}
bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT;
do_div(bw, rtt_us);
- sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
+ WRITE_ONCE(sk->sk_pacing_rate,
+ bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain));
}
/* Pace using current bw estimate and a gain factor. */
@@ -290,14 +291,14 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
bbr_init_pacing_rate_from_rtt(sk);
- if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
- sk->sk_pacing_rate = rate;
+ if (bbr_full_bw_reached(sk) || rate > READ_ONCE(sk->sk_pacing_rate))
+ WRITE_ONCE(sk->sk_pacing_rate, rate);
}
/* override sysctl_tcp_min_tso_segs */
__bpf_kfunc static u32 bbr_min_tso_segs(struct sock *sk)
{
- return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
+ return READ_ONCE(sk->sk_pacing_rate) < (bbr_min_tso_rate >> 3) ? 1 : 2;
}
static u32 bbr_tso_segs_goal(struct sock *sk)
@@ -309,7 +310,7 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
* driver provided sk_gso_max_size.
*/
bytes = min_t(unsigned long,
- sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
+ READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift),
GSO_LEGACY_MAX_SIZE - 1 - MAX_TCP_HEADER);
segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8afb0950a697..4b8f2e74d71d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -940,8 +940,8 @@ static void tcp_update_pacing_rate(struct sock *sk)
* without any lock. We want to make sure compiler wont store
* intermediate values in this location.
*/
- WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate,
- sk->sk_max_pacing_rate));
+ WRITE_ONCE(sk->sk_pacing_rate,
+ min_t(u64, rate, READ_ONCE(sk->sk_max_pacing_rate)));
}
/* Calculate rto without backoff. This is the second half of Van Jacobson's
@@ -2101,6 +2101,10 @@ void tcp_clear_retrans(struct tcp_sock *tp)
tp->undo_marker = 0;
tp->undo_retrans = -1;
tp->sacked_out = 0;
+ tp->rto_stamp = 0;
+ tp->total_rto = 0;
+ tp->total_rto_recoveries = 0;
+ tp->total_rto_time = 0;
}
static inline void tcp_init_undo(struct tcp_sock *tp)
@@ -2838,6 +2842,14 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack)
tcp_set_ca_state(sk, TCP_CA_Recovery);
}
+static void tcp_update_rto_time(struct tcp_sock *tp)
+{
+ if (tp->rto_stamp) {
+ tp->total_rto_time += tcp_time_stamp(tp) - tp->rto_stamp;
+ tp->rto_stamp = 0;
+ }
+}
+
/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
* recovered or spurious. Otherwise retransmits more on partial ACKs.
*/
@@ -3042,6 +3054,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
break;
case TCP_CA_Loss:
tcp_process_loss(sk, flag, num_dupack, rexmit);
+ if (icsk->icsk_ca_state != TCP_CA_Loss)
+ tcp_update_rto_time(tp);
tcp_identify_packet_loss(sk, ack_flag);
if (!(icsk->icsk_ca_state == TCP_CA_Open ||
(*ack_flag & FLAG_LOST_RETRANS)))
@@ -5566,6 +5580,14 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
tcp_in_quickack_mode(sk) ||
/* Protocol state mandates a one-time immediate ACK */
inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) {
+ /* If we are running from __release_sock() in user context,
+ * Defer the ack until tcp_release_cb().
+ */
+ if (sock_owned_by_user_nocheck(sk) &&
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_backlog_ack_defer)) {
+ set_bit(TCP_ACK_DEFERRED, &sk->sk_tsq_flags);
+ return;
+ }
send_now:
tcp_send_ack(sk);
return;
@@ -6449,22 +6471,24 @@ reset_and_undo:
static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
{
+ struct tcp_sock *tp = tcp_sk(sk);
struct request_sock *req;
/* If we are still handling the SYNACK RTO, see if timestamp ECR allows
* undo. If peer SACKs triggered fast recovery, we can't undo here.
*/
- if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
- tcp_try_undo_loss(sk, false);
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
+ tcp_try_undo_recovery(sk);
/* Reset rtx states to prevent spurious retransmits_timed_out() */
- tcp_sk(sk)->retrans_stamp = 0;
+ tcp_update_rto_time(tp);
+ tp->retrans_stamp = 0;
inet_csk(sk)->icsk_retransmits = 0;
/* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
* we no longer need req so release it.
*/
- req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+ req = rcu_dereference_protected(tp->fastopen_rsk,
lockdep_sock_is_held(sk));
reqsk_fastopen_remove(sk, req, false);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 27140e5cdc06..a441740616d7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -828,7 +828,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
inet_twsk(sk)->tw_mark : sk->sk_mark;
ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
- inet_twsk(sk)->tw_priority : sk->sk_priority;
+ inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
transmit_time = tcp_transmit_time(sk);
xfrm_sk_clone_policy(ctl_sk, sk);
txhash = (sk->sk_state == TCP_TIME_WAIT) ?
@@ -1024,10 +1024,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
if (skb) {
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
- tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
- (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
- (inet_sk(sk)->tos & INET_ECN_MASK) :
- inet_sk(sk)->tos;
+ tos = READ_ONCE(inet_sk(sk)->tos);
+
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
+ tos = (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
+ (tos & INET_ECN_MASK);
if (!INET_ECN_is_capable(tos) &&
tcp_bpf_ca_needs_ecn((struct sock *)req))
@@ -3263,6 +3264,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
net->ipv4.sysctl_tcp_comp_sack_nr = 44;
+ net->ipv4.sysctl_tcp_backlog_ack_defer = 1;
net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
atomic_set(&net->ipv4.tfo_active_disable_times, 0);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index c196759f1d3b..c2a925538542 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -470,11 +470,15 @@ void tcp_init_metrics(struct sock *sk)
u32 val, crtt = 0; /* cached RTT scaled by 8 */
sk_dst_confirm(sk);
+ /* ssthresh may have been reduced unnecessarily during.
+ * 3WHS. Restore it back to its initial default.
+ */
+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
if (!dst)
goto reset;
rcu_read_lock();
- tm = tcp_get_metrics(sk, dst, true);
+ tm = tcp_get_metrics(sk, dst, false);
if (!tm) {
rcu_read_unlock();
goto reset;
@@ -489,11 +493,6 @@ void tcp_init_metrics(struct sock *sk)
tp->snd_ssthresh = val;
if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
tp->snd_ssthresh = tp->snd_cwnd_clamp;
- } else {
- /* ssthresh may have been reduced unnecessarily during.
- * 3WHS. Restore it back to its initial default.
- */
- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
}
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
if (val && tp->reordering != val)
@@ -899,22 +898,25 @@ static void tcp_metrics_flush_all(struct net *net)
unsigned int row;
for (row = 0; row < max_rows; row++, hb++) {
- struct tcp_metrics_block __rcu **pp;
+ struct tcp_metrics_block __rcu **pp = &hb->chain;
bool match;
+ if (!rcu_access_pointer(*pp))
+ continue;
+
spin_lock_bh(&tcp_metrics_lock);
- pp = &hb->chain;
for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
match = net ? net_eq(tm_net(tm), net) :
!refcount_read(&tm_net(tm)->ns.count);
if (match) {
- *pp = tm->tcpm_next;
+ rcu_assign_pointer(*pp, tm->tcpm_next);
kfree_rcu(tm, rcu_head);
} else {
pp = &tm->tcpm_next;
}
}
spin_unlock_bh(&tcp_metrics_lock);
+ cond_resched();
}
}
@@ -949,7 +951,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
if (addr_same(&tm->tcpm_daddr, &daddr) &&
(!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
net_eq(tm_net(tm), net)) {
- *pp = tm->tcpm_next;
+ rcu_assign_pointer(*pp, tm->tcpm_next);
kfree_rcu(tm, rcu_head);
found = true;
} else {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b98d476f1594..3f87611077ef 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -292,7 +292,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
tw->tw_mark = sk->sk_mark;
- tw->tw_priority = sk->sk_priority;
+ tw->tw_priority = READ_ONCE(sk->sk_priority);
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt;
tcptw->tw_snd_nxt = tp->snd_nxt;
@@ -565,6 +565,10 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->undo_marker = treq->snt_isn;
newtp->retrans_stamp = div_u64(treq->snt_synack,
USEC_PER_SEC / TCP_TS_HZ);
+ newtp->total_rto = req->num_timeout;
+ newtp->total_rto_recoveries = 1;
+ newtp->total_rto_time = tcp_time_stamp_raw() -
+ newtp->retrans_stamp;
}
newtp->tsoffset = treq->ts_off;
#ifdef CONFIG_TCP_MD5SIG
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index aa0fc8c766e5..f207712eece1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1076,7 +1076,8 @@ static void tcp_tasklet_func(struct tasklet_struct *t)
#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \
TCPF_WRITE_TIMER_DEFERRED | \
TCPF_DELACK_TIMER_DEFERRED | \
- TCPF_MTU_REDUCED_DEFERRED)
+ TCPF_MTU_REDUCED_DEFERRED | \
+ TCPF_ACK_DEFERRED)
/**
* tcp_release_cb - tcp release_sock() callback
* @sk: socket
@@ -1100,16 +1101,6 @@ void tcp_release_cb(struct sock *sk)
tcp_tsq_write(sk);
__sock_put(sk);
}
- /* Here begins the tricky part :
- * We are called from release_sock() with :
- * 1) BH disabled
- * 2) sk_lock.slock spinlock held
- * 3) socket owned by us (sk->sk_lock.owned == 1)
- *
- * But following code is meant to be called from BH handlers,
- * so we should keep BH disabled, but early release socket ownership
- */
- sock_release_ownership(sk);
if (flags & TCPF_WRITE_TIMER_DEFERRED) {
tcp_write_timer_handler(sk);
@@ -1123,6 +1114,8 @@ void tcp_release_cb(struct sock *sk)
inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
__sock_put(sk);
}
+ if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk))
+ tcp_send_ack(sk);
}
EXPORT_SYMBOL(tcp_release_cb);
@@ -1207,7 +1200,7 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
if (sk->sk_pacing_status != SK_PACING_NONE) {
- unsigned long rate = sk->sk_pacing_rate;
+ unsigned long rate = READ_ONCE(sk->sk_pacing_rate);
/* Original sch_fq does not pace first 10 MSS
* Note that tp->data_segs_out overflows after 2^32 packets,
@@ -1331,7 +1324,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
- skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
+ skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
/* Build TCP header and checksum it. */
th = (struct tcphdr *)skb->data;
@@ -1979,7 +1972,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
unsigned long bytes;
u32 r;
- bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);
+ bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift);
r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
@@ -2559,7 +2552,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
limit = max_t(unsigned long,
2 * skb->truesize,
- sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
+ READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift));
if (sk->sk_pacing_status == SK_PACING_NONE)
limit = min_t(unsigned long, limit,
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
@@ -2567,7 +2560,8 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
tcp_sk(sk)->tcp_tx_delay) {
- u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
+ u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) *
+ tcp_sk(sk)->tcp_tx_delay;
/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
* approximate our needs assuming an ~100% skb->truesize overhead.
@@ -3983,6 +3977,20 @@ int tcp_connect(struct sock *sk)
}
EXPORT_SYMBOL(tcp_connect);
+u32 tcp_delack_max(const struct sock *sk)
+{
+ const struct dst_entry *dst = __sk_dst_get(sk);
+ u32 delack_max = inet_csk(sk)->icsk_delack_max;
+
+ if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) {
+ u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
+ u32 delack_from_rto_min = max_t(int, 1, rto_min - 1);
+
+ delack_max = min_t(u32, delack_max, delack_from_rto_min);
+ }
+ return delack_max;
+}
+
/* Send out a delayed ack, the caller does the policy checking
* to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
* for details.
@@ -4018,7 +4026,7 @@ void tcp_send_delayed_ack(struct sock *sk)
ato = min(ato, max_ato);
}
- ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max);
+ ato = min_t(u32, ato, tcp_delack_max(sk));
/* Stay within the limit we were given */
timeout = jiffies + ato;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 984ab4a0421e..3f61c6a70a1f 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -394,7 +394,7 @@ static void tcp_probe_timer(struct sock *sk)
if (user_timeout &&
(s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
msecs_to_jiffies(user_timeout))
- goto abort;
+ goto abort;
}
max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
if (sock_flag(sk, SOCK_DEAD)) {
@@ -415,6 +415,19 @@ abort: tcp_write_err(sk);
}
}
+static void tcp_update_rto_stats(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!icsk->icsk_retransmits) {
+ tp->total_rto_recoveries++;
+ tp->rto_stamp = tcp_time_stamp(tp);
+ }
+ icsk->icsk_retransmits++;
+ tp->total_rto++;
+}
+
/*
* Timer for Fast Open socket to retransmit SYNACK. Note that the
* sk here is the child socket, not the parent (listener) socket.
@@ -447,7 +460,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
*/
inet_rtx_syn_ack(sk, req);
req->num_timeout++;
- icsk->icsk_retransmits++;
+ tcp_update_rto_stats(sk);
if (!tp->retrans_stamp)
tp->retrans_stamp = tcp_time_stamp(tp);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
@@ -575,7 +588,7 @@ void tcp_retransmit_timer(struct sock *sk)
tcp_enter_loss(sk);
- icsk->icsk_retransmits++;
+ tcp_update_rto_stats(sk);
if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
/* Retransmission failed because of local congestion,
* Let senders fight for local resources conservatively.
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f39b9c844580..7f7724beca33 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -714,7 +714,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
iph->saddr, uh->source, skb->dev->ifindex,
inet_sdif(skb), udptable, NULL);
- if (!sk || udp_sk(sk)->encap_type) {
+ if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
/* No socket for error: try tunnels before discarding */
if (static_branch_unlikely(&udp_encap_needed_key)) {
sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
@@ -750,7 +750,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
case ICMP_DEST_UNREACH:
if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
ipv4_sk_update_pmtu(skb, sk, info);
- if (inet->pmtudisc != IP_PMTUDISC_DONT) {
+ if (READ_ONCE(inet->pmtudisc) != IP_PMTUDISC_DONT) {
err = EMSGSIZE;
harderr = 1;
break;
@@ -1051,10 +1051,11 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
u8 tos, scope;
__be16 dport;
int err, is_udplite = IS_UDPLITE(sk);
- int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
+ int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
struct ip_options_data opt_copy;
+ int uc_index;
if (len > 0xFFFF)
return -EMSGSIZE;
@@ -1173,25 +1174,26 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (scope == RT_SCOPE_LINK)
connected = 0;
+ uc_index = READ_ONCE(inet->uc_index);
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
- ipc.oif = inet->mc_index;
+ ipc.oif = READ_ONCE(inet->mc_index);
if (!saddr)
- saddr = inet->mc_addr;
+ saddr = READ_ONCE(inet->mc_addr);
connected = 0;
} else if (!ipc.oif) {
- ipc.oif = inet->uc_index;
- } else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
+ ipc.oif = uc_index;
+ } else if (ipv4_is_lbcast(daddr) && uc_index) {
/* oif is set, packet is to local broadcast and
* uc_index is set. oif is most likely set
* by sk_bound_dev_if. If uc_index != oif check if the
* oif is an L3 master and uc_index is an L3 slave.
* If so, we want to allow the send using the uc_index.
*/
- if (ipc.oif != inet->uc_index &&
+ if (ipc.oif != uc_index &&
ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
- inet->uc_index)) {
- ipc.oif = inet->uc_index;
+ uc_index)) {
+ ipc.oif = uc_index;
}
}
@@ -1315,11 +1317,11 @@ void udp_splice_eof(struct socket *sock)
struct sock *sk = sock->sk;
struct udp_sock *up = udp_sk(sk);
- if (!up->pending || READ_ONCE(up->corkflag))
+ if (!up->pending || udp_test_bit(CORK, sk))
return;
lock_sock(sk);
- if (up->pending && !READ_ONCE(up->corkflag))
+ if (up->pending && !udp_test_bit(CORK, sk))
udp_push_pending_frames(sk);
release_sock(sk);
}
@@ -1868,7 +1870,7 @@ try_again:
(struct sockaddr *)sin);
}
- if (udp_sk(sk)->gro_enabled)
+ if (udp_test_bit(GRO_ENABLED, sk))
udp_cmsg_recv(msg, sk, skb);
if (inet_cmsg_flags(inet))
@@ -2081,7 +2083,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
}
nf_reset_ct(skb);
- if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
+ if (static_branch_unlikely(&udp_encap_needed_key) &&
+ READ_ONCE(up->encap_type)) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
@@ -2119,7 +2122,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
/*
* UDP-Lite specific tests, ignored on UDP sockets
*/
- if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
+ if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
+ u16 pcrlen = READ_ONCE(up->pcrlen);
/*
* MIB statistics other than incrementing the error count are
@@ -2132,7 +2136,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
* delivery of packets with coverage values less than a value
* provided by the application."
*/
- if (up->pcrlen == 0) { /* full coverage was set */
+ if (pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
@@ -2143,9 +2147,9 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
* that it wants x while sender emits packets of smaller size y.
* Therefore the above ...()->partial_cov statement is essential.
*/
- if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
+ if (UDP_SKB_CB(skb)->cscov < pcrlen) {
net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
- UDP_SKB_CB(skb)->cscov, up->pcrlen);
+ UDP_SKB_CB(skb)->cscov, pcrlen);
goto drop;
}
}
@@ -2618,7 +2622,7 @@ void udp_destroy_sock(struct sock *sk)
if (encap_destroy)
encap_destroy(sk);
}
- if (up->encap_enabled)
+ if (udp_test_bit(ENCAP_ENABLED, sk))
static_branch_dec(&udp_encap_needed_key);
}
}
@@ -2658,9 +2662,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
switch (optname) {
case UDP_CORK:
if (val != 0) {
- WRITE_ONCE(up->corkflag, 1);
+ udp_set_bit(CORK, sk);
} else {
- WRITE_ONCE(up->corkflag, 0);
+ udp_clear_bit(CORK, sk);
lock_sock(sk);
push_pending_frames(sk);
release_sock(sk);
@@ -2675,17 +2679,17 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
case UDP_ENCAP_ESPINUDP_NON_IKE:
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
- up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
+ WRITE_ONCE(up->encap_rcv,
+ ipv6_stub->xfrm6_udp_encap_rcv);
else
#endif
- up->encap_rcv = xfrm4_udp_encap_rcv;
+ WRITE_ONCE(up->encap_rcv,
+ xfrm4_udp_encap_rcv);
#endif
fallthrough;
case UDP_ENCAP_L2TPINUDP:
- up->encap_type = val;
- lock_sock(sk);
- udp_tunnel_encap_enable(sk->sk_socket);
- release_sock(sk);
+ WRITE_ONCE(up->encap_type, val);
+ udp_tunnel_encap_enable(sk);
break;
default:
err = -ENOPROTOOPT;
@@ -2694,11 +2698,11 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
break;
case UDP_NO_CHECK6_TX:
- up->no_check6_tx = valbool;
+ udp_set_no_check6_tx(sk, valbool);
break;
case UDP_NO_CHECK6_RX:
- up->no_check6_rx = valbool;
+ udp_set_no_check6_rx(sk, valbool);
break;
case UDP_SEGMENT:
@@ -2708,14 +2712,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
break;
case UDP_GRO:
- lock_sock(sk);
/* when enabling GRO, accept the related GSO packet type */
if (valbool)
- udp_tunnel_encap_enable(sk->sk_socket);
- up->gro_enabled = valbool;
- up->accept_udp_l4 = valbool;
- release_sock(sk);
+ udp_tunnel_encap_enable(sk);
+ udp_assign_bit(GRO_ENABLED, sk, valbool);
+ udp_assign_bit(ACCEPT_L4, sk, valbool);
break;
/*
@@ -2730,8 +2732,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
val = 8;
else if (val > USHRT_MAX)
val = USHRT_MAX;
- up->pcslen = val;
- up->pcflag |= UDPLITE_SEND_CC;
+ WRITE_ONCE(up->pcslen, val);
+ udp_set_bit(UDPLITE_SEND_CC, sk);
break;
/* The receiver specifies a minimum checksum coverage value. To make
@@ -2744,8 +2746,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
val = 8;
else if (val > USHRT_MAX)
val = USHRT_MAX;
- up->pcrlen = val;
- up->pcflag |= UDPLITE_RECV_CC;
+ WRITE_ONCE(up->pcrlen, val);
+ udp_set_bit(UDPLITE_RECV_CC, sk);
break;
default:
@@ -2783,19 +2785,19 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
switch (optname) {
case UDP_CORK:
- val = READ_ONCE(up->corkflag);
+ val = udp_test_bit(CORK, sk);
break;
case UDP_ENCAP:
- val = up->encap_type;
+ val = READ_ONCE(up->encap_type);
break;
case UDP_NO_CHECK6_TX:
- val = up->no_check6_tx;
+ val = udp_get_no_check6_tx(sk);
break;
case UDP_NO_CHECK6_RX:
- val = up->no_check6_rx;
+ val = udp_get_no_check6_rx(sk);
break;
case UDP_SEGMENT:
@@ -2803,17 +2805,17 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
break;
case UDP_GRO:
- val = up->gro_enabled;
+ val = udp_test_bit(GRO_ENABLED, sk);
break;
/* The following two cannot be changed on UDP sockets, the return is
* always 0 (which corresponds to the full checksum coverage of UDP). */
case UDPLITE_SEND_CSCOV:
- val = up->pcslen;
+ val = READ_ONCE(up->pcslen);
break;
case UDPLITE_RECV_CSCOV:
- val = up->pcrlen;
+ val = READ_ONCE(up->pcrlen);
break;
default:
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 0f46b3c2e4ac..6c95d28d0c4a 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -557,10 +557,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
NAPI_GRO_CB(skb)->is_flist = 0;
if (!sk || !udp_sk(sk)->gro_receive) {
if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
- NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
+ NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
- (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
+ (sk && udp_test_bit(GRO_ENABLED, sk)) || NAPI_GRO_CB(skb)->is_flist)
return call_gro_receive(udp_gro_receive_segment, head, skb);
/* no GRO, be sure flush the current packet */
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
index 9b18f371af0d..1e7e4aecdc48 100644
--- a/net/ipv4/udp_tunnel_core.c
+++ b/net/ipv4/udp_tunnel_core.c
@@ -78,7 +78,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
udp_sk(sk)->gro_receive = cfg->gro_receive;
udp_sk(sk)->gro_complete = cfg->gro_complete;
- udp_tunnel_encap_enable(sock);
+ udp_tunnel_encap_enable(sk);
}
EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
index 029219749785..b6d2d16189c0 100644
--- a/net/ipv4/udp_tunnel_nic.c
+++ b/net/ipv4/udp_tunnel_nic.c
@@ -47,7 +47,7 @@ struct udp_tunnel_nic {
unsigned int n_tables;
unsigned long missed;
- struct udp_tunnel_nic_table_entry **entries;
+ struct udp_tunnel_nic_table_entry *entries[] __counted_by(n_tables);
};
/* We ensure all work structs are done using driver state, but not the code.
@@ -725,16 +725,12 @@ udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
struct udp_tunnel_nic *utn;
unsigned int i;
- utn = kzalloc(sizeof(*utn), GFP_KERNEL);
+ utn = kzalloc(struct_size(utn, entries, n_tables), GFP_KERNEL);
if (!utn)
return NULL;
utn->n_tables = n_tables;
INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
- utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
- if (!utn->entries)
- goto err_free_utn;
-
for (i = 0; i < n_tables; i++) {
utn->entries[i] = kcalloc(info->tables[i].n_entries,
sizeof(*utn->entries[i]), GFP_KERNEL);
@@ -747,8 +743,6 @@ udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
err_free_prev_entries:
while (i--)
kfree(utn->entries[i]);
- kfree(utn->entries);
-err_free_utn:
kfree(utn);
return NULL;
}
@@ -759,7 +753,6 @@ static void udp_tunnel_nic_free(struct udp_tunnel_nic *utn)
for (i = 0; i < utn->n_tables; i++)
kfree(utn->entries[i]);
- kfree(utn->entries);
kfree(utn);
}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 39ecdad1b50c..af37af3ab727 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -21,7 +21,6 @@ EXPORT_SYMBOL(udplite_table);
static int udplite_sk_init(struct sock *sk)
{
udp_init_sock(sk);
- udp_sk(sk)->pcflag = UDPLITE_BIT;
pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
"please contact the netdev mailing list\n");
return 0;
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index eac206a290d0..183f6dc37242 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -85,11 +85,11 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
struct udphdr *uh;
struct iphdr *iph;
int iphlen, len;
-
__u8 *udpdata;
__be32 *udpdata32;
- __u16 encap_type = up->encap_type;
+ u16 encap_type;
+ encap_type = READ_ONCE(up->encap_type);
/* if this is not encapsulated socket, then just return now */
if (!encap_type)
return 1;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0b6ee962c84e..c2d471ad7922 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -236,6 +236,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.ioam6_id = IOAM6_DEFAULT_IF_ID,
.ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
.ndisc_evict_nocarrier = 1,
+ .ra_honor_pio_life = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -297,6 +298,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.ioam6_id = IOAM6_DEFAULT_IF_ID,
.ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
.ndisc_evict_nocarrier = 1,
+ .ra_honor_pio_life = 0,
};
/* Check if link is ready: is it up and is a valid qdisc available */
@@ -2657,22 +2659,23 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
else
stored_lft = 0;
- if (!create && stored_lft) {
+
+ /* RFC4862 Section 5.5.3e:
+ * "Note that the preferred lifetime of the
+ * corresponding address is always reset to
+ * the Preferred Lifetime in the received
+ * Prefix Information option, regardless of
+ * whether the valid lifetime is also reset or
+ * ignored."
+ *
+ * So we should always update prefered_lft here.
+ */
+ update_lft = !create && stored_lft;
+
+ if (update_lft && !in6_dev->cnf.ra_honor_pio_life) {
const u32 minimum_lft = min_t(u32,
stored_lft, MIN_VALID_LIFETIME);
valid_lft = max(valid_lft, minimum_lft);
-
- /* RFC4862 Section 5.5.3e:
- * "Note that the preferred lifetime of the
- * corresponding address is always reset to
- * the Preferred Lifetime in the received
- * Prefix Information option, regardless of
- * whether the valid lifetime is also reset or
- * ignored."
- *
- * So we should always update prefered_lft here.
- */
- update_lft = 1;
}
if (update_lft) {
@@ -6846,6 +6849,15 @@ static const struct ctl_table addrconf_sysctl[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "ra_honor_pio_life",
+ .data = &ipv6_devconf.ra_honor_pio_life,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
#ifdef CONFIG_IPV6_ROUTER_PREF
{
.procname = "accept_ra_rtr_pref",
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 368824fe9719..c6ad0d6e99b5 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -217,10 +217,11 @@ lookup_protocol:
inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk);
np->hop_limit = -1;
np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
- np->mc_loop = 1;
- np->mc_all = 1;
+ inet6_set_bit(MC6_LOOP, sk);
+ inet6_set_bit(MC6_ALL, sk);
np->pmtudisc = IPV6_PMTUDISC_WANT;
- np->repflow = net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_ESTABLISHED;
+ inet6_assign_bit(REPFLOW, sk, net->ipv6.sysctl.flowlabel_reflect &
+ FLOWLABEL_REFLECT_ESTABLISHED);
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
@@ -536,7 +537,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
}
sin->sin6_port = inet->inet_dport;
sin->sin6_addr = sk->sk_v6_daddr;
- if (np->sndflow)
+ if (inet6_test_bit(SNDFLOW, sk))
sin->sin6_flowinfo = np->flow_label;
BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
CGROUP_INET6_GETPEERNAME);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 41ebc4e57473..cc6a502db39d 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -80,7 +80,8 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
struct flowi6 fl6;
int err = 0;
- if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
+ if (inet6_test_bit(SNDFLOW, sk) &&
+ (np->flow_label & IPV6_FLOWLABEL_MASK)) {
flowlabel = fl6_sock_lookup(sk, np->flow_label);
if (IS_ERR(flowlabel))
return -EINVAL;
@@ -163,7 +164,7 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
if (usin->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
- if (np->sndflow)
+ if (inet6_test_bit(SNDFLOW, sk))
fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
if (ipv6_addr_any(&usin->sin6_addr)) {
@@ -305,11 +306,10 @@ static void ipv6_icmp_error_rfc4884(const struct sk_buff *skb,
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
struct icmp6hdr *icmph = icmp6_hdr(skb);
struct sock_exterr_skb *serr;
- if (!np->recverr)
+ if (!inet6_test_bit(RECVERR6, sk))
return;
skb = skb_clone(skb, GFP_ATOMIC);
@@ -332,7 +332,7 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__skb_pull(skb, payload - skb->data);
- if (inet6_sk(sk)->recverr_rfc4884)
+ if (inet6_test_bit(RECVERR6_RFC4884, sk))
ipv6_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
skb_reset_transport_header(skb);
@@ -344,12 +344,11 @@ EXPORT_SYMBOL_GPL(ipv6_icmp_error);
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
{
- const struct ipv6_pinfo *np = inet6_sk(sk);
struct sock_exterr_skb *serr;
struct ipv6hdr *iph;
struct sk_buff *skb;
- if (!np->recverr)
+ if (!inet6_test_bit(RECVERR6, sk))
return;
skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
@@ -493,7 +492,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
const struct ipv6hdr *ip6h = container_of((struct in6_addr *)(nh + serr->addr_offset),
struct ipv6hdr, daddr);
sin->sin6_addr = ip6h->daddr;
- if (np->sndflow)
+ if (inet6_test_bit(SNDFLOW, sk))
sin->sin6_flowinfo = ip6_flowinfo(ip6h);
sin->sin6_scope_id =
ipv6_iface_scope_id(&sin->sin6_addr,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 93a594a901d1..8fb4a791881a 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -588,7 +588,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
- ipcm6_init_sk(&ipc6, np);
+ ipcm6_init_sk(&ipc6, sk);
ipc6.sockc.mark = mark;
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
@@ -791,7 +791,7 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
msg.offset = 0;
msg.type = type;
- ipcm6_init_sk(&ipc6, np);
+ ipcm6_init_sk(&ipc6, sk);
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
ipc6.sockc.mark = mark;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 0c50dcd35fe8..80043e46117c 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -133,7 +133,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
fl6.daddr = sk->sk_v6_daddr;
res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
- np->tclass, sk->sk_priority);
+ np->tclass, READ_ONCE(sk->sk_priority));
rcu_read_unlock();
return res;
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index b3ca4beb4405..eca07e10e21f 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -513,7 +513,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
return 0;
}
- if (np->repflow) {
+ if (inet6_test_bit(REPFLOW, sk)) {
freq->flr_label = np->flow_label;
return 0;
}
@@ -551,10 +551,10 @@ static int ipv6_flowlabel_put(struct sock *sk, struct in6_flowlabel_req *freq)
if (freq->flr_flags & IPV6_FL_F_REFLECT) {
if (sk->sk_protocol != IPPROTO_TCP)
return -ENOPROTOOPT;
- if (!np->repflow)
+ if (!inet6_test_bit(REPFLOW, sk))
return -ESRCH;
np->flow_label = 0;
- np->repflow = 0;
+ inet6_clear_bit(REPFLOW, sk);
return 0;
}
@@ -626,7 +626,7 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq,
if (sk->sk_protocol != IPPROTO_TCP)
return -ENOPROTOOPT;
- np->repflow = 1;
+ inet6_set_bit(REPFLOW, sk);
return 0;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 54fc4c711f2c..cdaa9275e990 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -232,12 +232,11 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(ip6_output);
-bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+bool ip6_autoflowlabel(struct net *net, const struct sock *sk)
{
- if (!np->autoflowlabel_set)
+ if (!inet6_test_bit(AUTOFLOWLABEL_SET, sk))
return ip6_default_np_autolabel(net);
- else
- return np->autoflowlabel;
+ return inet6_test_bit(AUTOFLOWLABEL, sk);
}
/*
@@ -309,12 +308,12 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
* Fill in the IPv6 header
*/
if (np)
- hlimit = np->hop_limit;
+ hlimit = READ_ONCE(np->hop_limit);
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
- ip6_autoflowlabel(net, np), fl6));
+ ip6_autoflowlabel(net, sk), fl6));
hdr->payload_len = htons(seg_len);
hdr->nexthdr = proto;
@@ -369,9 +368,8 @@ static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
if (sk && ra->sel == sel &&
(!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == skb->dev->ifindex)) {
- struct ipv6_pinfo *np = inet6_sk(sk);
- if (np && np->rtalert_isolate &&
+ if (inet6_test_bit(RTALERT_ISOLATE, sk) &&
!net_eq(sock_net(sk), dev_net(skb->dev))) {
continue;
}
@@ -881,9 +879,11 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
mtu = IPV6_MIN_MTU;
}
- if (np && np->frag_size < mtu) {
- if (np->frag_size)
- mtu = np->frag_size;
+ if (np) {
+ u32 frag_size = READ_ONCE(np->frag_size);
+
+ if (frag_size && frag_size < mtu)
+ mtu = frag_size;
}
if (mtu < hlen + sizeof(struct frag_hdr) + 8)
goto fail_toobig;
@@ -1113,7 +1113,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
rcu_read_lock();
from = rt ? rcu_dereference(rt->from) : NULL;
err = ip6_route_get_saddr(net, from, &fl6->daddr,
- sk ? inet6_sk(sk)->srcprefs : 0,
+ sk ? READ_ONCE(inet6_sk(sk)->srcprefs) : 0,
&fl6->saddr);
rcu_read_unlock();
@@ -1392,7 +1392,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
struct rt6_info *rt)
{
struct ipv6_pinfo *np = inet6_sk(sk);
- unsigned int mtu;
+ unsigned int mtu, frag_size;
struct ipv6_txoptions *nopt, *opt = ipc6->opt;
/* callers pass dst together with a reference, set it first so
@@ -1436,15 +1436,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
v6_cork->hop_limit = ipc6->hlimit;
v6_cork->tclass = ipc6->tclass;
if (rt->dst.flags & DST_XFRM_TUNNEL)
- mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+ mtu = READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE ?
READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
else
- mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+ mtu = READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE ?
READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
- if (np->frag_size < mtu) {
- if (np->frag_size)
- mtu = np->frag_size;
- }
+
+ frag_size = READ_ONCE(np->frag_size);
+ if (frag_size && frag_size < mtu)
+ mtu = frag_size;
+
cork->base.fragsize = mtu;
cork->base.gso_size = ipc6->gso_size;
cork->base.tx_flags = 0;
@@ -1935,7 +1936,6 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
struct sk_buff *skb, *tmp_skb;
struct sk_buff **tail_skb;
struct in6_addr *final_dst;
- struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
struct ipv6hdr *hdr;
struct ipv6_txoptions *opt = v6_cork->opt;
@@ -1978,13 +1978,13 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
ip6_flow_hdr(hdr, v6_cork->tclass,
ip6_make_flowlabel(net, skb, fl6->flowlabel,
- ip6_autoflowlabel(net, np), fl6));
+ ip6_autoflowlabel(net, sk), fl6));
hdr->hop_limit = v6_cork->hop_limit;
hdr->nexthdr = proto;
hdr->saddr = fl6->saddr;
hdr->daddr = *final_dst;
- skb->priority = sk->sk_priority;
+ skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = cork->base.mark;
skb->tstamp = cork->base.transmit_time;
@@ -2091,7 +2091,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
return ERR_PTR(err);
}
if (ipc6->dontfrag < 0)
- ipc6->dontfrag = inet6_sk(sk)->dontfrag;
+ ipc6->dontfrag = inet6_test_bit(DONTFRAG, sk);
err = __ip6_append_data(sk, &queue, cork, &v6_cork,
&current->task_frag, getfrag, from,
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index cdc4d4ee2420..70d38705c92f 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -75,8 +75,9 @@ EXPORT_SYMBOL_GPL(udp_sock_create6);
int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
- struct net_device *dev, struct in6_addr *saddr,
- struct in6_addr *daddr,
+ struct net_device *dev,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be32 label,
__be16 src_port, __be16 dst_port, bool nocheck)
{
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0e2a0847b387..7d661735cb9d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -415,6 +415,101 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (ip6_mroute_opt(optname))
return ip6_mroute_setsockopt(sk, optname, optval, optlen);
+ /* Handle options that can be set without locking the socket. */
+ switch (optname) {
+ case IPV6_UNICAST_HOPS:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (val > 255 || val < -1)
+ return -EINVAL;
+ WRITE_ONCE(np->hop_limit, val);
+ return 0;
+ case IPV6_MULTICAST_LOOP:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (val != valbool)
+ return -EINVAL;
+ inet6_assign_bit(MC6_LOOP, sk, valbool);
+ return 0;
+ case IPV6_MULTICAST_HOPS:
+ if (sk->sk_type == SOCK_STREAM)
+ return retv;
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (val > 255 || val < -1)
+ return -EINVAL;
+ WRITE_ONCE(np->mcast_hops,
+ val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
+ return 0;
+ case IPV6_MTU:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (val && val < IPV6_MIN_MTU)
+ return -EINVAL;
+ WRITE_ONCE(np->frag_size, val);
+ return 0;
+ case IPV6_MINHOPCOUNT:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ if (val)
+ static_branch_enable(&ip6_min_hopcount);
+
+ /* tcp_v6_err() and tcp_v6_rcv() might read min_hopcount
+ * while we are changing it.
+ */
+ WRITE_ONCE(np->min_hopcount, val);
+ return 0;
+ case IPV6_RECVERR_RFC4884:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (val < 0 || val > 1)
+ return -EINVAL;
+ inet6_assign_bit(RECVERR6_RFC4884, sk, valbool);
+ return 0;
+ case IPV6_MULTICAST_ALL:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ inet6_assign_bit(MC6_ALL, sk, valbool);
+ return 0;
+ case IPV6_AUTOFLOWLABEL:
+ inet6_assign_bit(AUTOFLOWLABEL, sk, valbool);
+ inet6_set_bit(AUTOFLOWLABEL_SET, sk);
+ return 0;
+ case IPV6_DONTFRAG:
+ inet6_assign_bit(DONTFRAG, sk, valbool);
+ return 0;
+ case IPV6_RECVERR:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ inet6_assign_bit(RECVERR6, sk, valbool);
+ if (!val)
+ skb_errqueue_purge(&sk->sk_error_queue);
+ return 0;
+ case IPV6_ROUTER_ALERT_ISOLATE:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ inet6_assign_bit(RTALERT_ISOLATE, sk, valbool);
+ return 0;
+ case IPV6_MTU_DISCOVER:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_OMIT)
+ return -EINVAL;
+ WRITE_ONCE(np->pmtudisc, val);
+ return 0;
+ case IPV6_FLOWINFO_SEND:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ inet6_assign_bit(SNDFLOW, sk, valbool);
+ return 0;
+ case IPV6_ADDR_PREFERENCES:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ return ip6_sock_set_addr_preferences(sk, val);
+ }
if (needs_rtnl)
rtnl_lock();
sockopt_lock_sock(sk);
@@ -733,34 +828,7 @@ done:
}
break;
}
- case IPV6_UNICAST_HOPS:
- if (optlen < sizeof(int))
- goto e_inval;
- if (val > 255 || val < -1)
- goto e_inval;
- np->hop_limit = val;
- retv = 0;
- break;
- case IPV6_MULTICAST_HOPS:
- if (sk->sk_type == SOCK_STREAM)
- break;
- if (optlen < sizeof(int))
- goto e_inval;
- if (val > 255 || val < -1)
- goto e_inval;
- np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
- retv = 0;
- break;
-
- case IPV6_MULTICAST_LOOP:
- if (optlen < sizeof(int))
- goto e_inval;
- if (val != valbool)
- goto e_inval;
- np->mc_loop = valbool;
- retv = 0;
- break;
case IPV6_UNICAST_IF:
{
@@ -862,13 +930,6 @@ done:
retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
break;
}
- case IPV6_MULTICAST_ALL:
- if (optlen < sizeof(int))
- goto e_inval;
- np->mc_all = valbool;
- retv = 0;
- break;
-
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
if (in_compat_syscall())
@@ -896,42 +957,6 @@ done:
goto e_inval;
retv = ip6_ra_control(sk, val);
break;
- case IPV6_ROUTER_ALERT_ISOLATE:
- if (optlen < sizeof(int))
- goto e_inval;
- np->rtalert_isolate = valbool;
- retv = 0;
- break;
- case IPV6_MTU_DISCOVER:
- if (optlen < sizeof(int))
- goto e_inval;
- if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_OMIT)
- goto e_inval;
- np->pmtudisc = val;
- retv = 0;
- break;
- case IPV6_MTU:
- if (optlen < sizeof(int))
- goto e_inval;
- if (val && val < IPV6_MIN_MTU)
- goto e_inval;
- np->frag_size = val;
- retv = 0;
- break;
- case IPV6_RECVERR:
- if (optlen < sizeof(int))
- goto e_inval;
- np->recverr = valbool;
- if (!val)
- skb_errqueue_purge(&sk->sk_error_queue);
- retv = 0;
- break;
- case IPV6_FLOWINFO_SEND:
- if (optlen < sizeof(int))
- goto e_inval;
- np->sndflow = valbool;
- retv = 0;
- break;
case IPV6_FLOWLABEL_MGR:
retv = ipv6_flowlabel_opt(sk, optval, optlen);
break;
@@ -943,47 +968,10 @@ done:
retv = xfrm_user_policy(sk, optname, optval, optlen);
break;
- case IPV6_ADDR_PREFERENCES:
- if (optlen < sizeof(int))
- goto e_inval;
- retv = __ip6_sock_set_addr_preferences(sk, val);
- break;
- case IPV6_MINHOPCOUNT:
- if (optlen < sizeof(int))
- goto e_inval;
- if (val < 0 || val > 255)
- goto e_inval;
-
- if (val)
- static_branch_enable(&ip6_min_hopcount);
-
- /* tcp_v6_err() and tcp_v6_rcv() might read min_hopcount
- * while we are changing it.
- */
- WRITE_ONCE(np->min_hopcount, val);
- retv = 0;
- break;
- case IPV6_DONTFRAG:
- np->dontfrag = valbool;
- retv = 0;
- break;
- case IPV6_AUTOFLOWLABEL:
- np->autoflowlabel = valbool;
- np->autoflowlabel_set = 1;
- retv = 0;
- break;
case IPV6_RECVFRAGSIZE:
np->rxopt.bits.recvfragsize = valbool;
retv = 0;
break;
- case IPV6_RECVERR_RFC4884:
- if (optlen < sizeof(int))
- goto e_inval;
- if (val < 0 || val > 1)
- goto e_inval;
- np->recverr_rfc4884 = valbool;
- retv = 0;
- break;
}
unlock:
@@ -1180,7 +1168,8 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxhlim) {
- int hlim = np->mcast_hops;
+ int hlim = READ_ONCE(np->mcast_hops);
+
put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
}
if (np->rxopt.bits.rxtclass) {
@@ -1197,7 +1186,8 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxohlim) {
- int hlim = np->mcast_hops;
+ int hlim = READ_ONCE(np->mcast_hops);
+
put_cmsg(&msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
}
if (np->rxopt.bits.rxflow) {
@@ -1347,9 +1337,9 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
struct dst_entry *dst;
if (optname == IPV6_UNICAST_HOPS)
- val = np->hop_limit;
+ val = READ_ONCE(np->hop_limit);
else
- val = np->mcast_hops;
+ val = READ_ONCE(np->mcast_hops);
if (val < 0) {
rcu_read_lock();
@@ -1365,7 +1355,7 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
case IPV6_MULTICAST_LOOP:
- val = np->mc_loop;
+ val = inet6_test_bit(MC6_LOOP, sk);
break;
case IPV6_MULTICAST_IF:
@@ -1373,7 +1363,7 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_MULTICAST_ALL:
- val = np->mc_all;
+ val = inet6_test_bit(MC6_ALL, sk);
break;
case IPV6_UNICAST_IF:
@@ -1381,15 +1371,15 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_MTU_DISCOVER:
- val = np->pmtudisc;
+ val = READ_ONCE(np->pmtudisc);
break;
case IPV6_RECVERR:
- val = np->recverr;
+ val = inet6_test_bit(RECVERR6, sk);
break;
case IPV6_FLOWINFO_SEND:
- val = np->sndflow;
+ val = inet6_test_bit(SNDFLOW, sk);
break;
case IPV6_FLOWLABEL_MGR:
@@ -1424,33 +1414,35 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
case IPV6_ADDR_PREFERENCES:
+ {
+ u8 srcprefs = READ_ONCE(np->srcprefs);
val = 0;
- if (np->srcprefs & IPV6_PREFER_SRC_TMP)
+ if (srcprefs & IPV6_PREFER_SRC_TMP)
val |= IPV6_PREFER_SRC_TMP;
- else if (np->srcprefs & IPV6_PREFER_SRC_PUBLIC)
+ else if (srcprefs & IPV6_PREFER_SRC_PUBLIC)
val |= IPV6_PREFER_SRC_PUBLIC;
else {
/* XXX: should we return system default? */
val |= IPV6_PREFER_SRC_PUBTMP_DEFAULT;
}
- if (np->srcprefs & IPV6_PREFER_SRC_COA)
+ if (srcprefs & IPV6_PREFER_SRC_COA)
val |= IPV6_PREFER_SRC_COA;
else
val |= IPV6_PREFER_SRC_HOME;
break;
-
+ }
case IPV6_MINHOPCOUNT:
- val = np->min_hopcount;
+ val = READ_ONCE(np->min_hopcount);
break;
case IPV6_DONTFRAG:
- val = np->dontfrag;
+ val = inet6_test_bit(DONTFRAG, sk);
break;
case IPV6_AUTOFLOWLABEL:
- val = ip6_autoflowlabel(sock_net(sk), np);
+ val = ip6_autoflowlabel(sock_net(sk), sk);
break;
case IPV6_RECVFRAGSIZE:
@@ -1458,11 +1450,11 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_ROUTER_ALERT_ISOLATE:
- val = np->rtalert_isolate;
+ val = inet6_test_bit(RTALERT_ISOLATE, sk);
break;
case IPV6_RECVERR_RFC4884:
- val = np->recverr_rfc4884;
+ val = inet6_test_bit(RECVERR6_RFC4884, sk);
break;
default:
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 5ce25bcb9974..99e28b444a4c 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -642,7 +642,7 @@ bool inet6_mc_check(const struct sock *sk, const struct in6_addr *mc_addr,
}
if (!mc) {
rcu_read_unlock();
- return np->mc_all;
+ return inet6_test_bit(MC6_ALL, sk);
}
psl = rcu_dereference(mc->sflist);
if (!psl) {
@@ -1716,7 +1716,7 @@ static void ip6_mc_hdr(const struct sock *sk, struct sk_buff *skb,
hdr->payload_len = htons(len);
hdr->nexthdr = proto;
- hdr->hop_limit = inet6_sk(sk)->hop_limit;
+ hdr->hop_limit = READ_ONCE(inet6_sk(sk)->hop_limit);
hdr->saddr = *saddr;
hdr->daddr = *daddr;
@@ -3011,8 +3011,6 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s
continue;
state->im = rcu_dereference(state->idev->mc_list);
}
- if (!state->im)
- break;
psf = rcu_dereference(state->im->mca_sources);
}
out:
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 553c8664e0a7..679443d7ecb5 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -500,7 +500,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
csum_partial(icmp6h,
skb->len, 0));
- ip6_nd_hdr(skb, saddr, daddr, inet6_sk(sk)->hop_limit, skb->len);
+ ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
@@ -1996,7 +1996,7 @@ static int __net_init ndisc_net_init(struct net *net)
np = inet6_sk(sk);
np->hop_limit = 255;
/* Do not loopback ndisc messages */
- np->mc_loop = 0;
+ inet6_clear_bit(MC6_LOOP, sk);
return 0;
}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 5831aaa53d75..e8fb0d275cc2 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -89,7 +89,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return -EAFNOSUPPORT;
}
daddr = &(u->sin6_addr);
- if (np->sndflow)
+ if (inet6_test_bit(SNDFLOW, sk))
fl6.flowlabel = u->sin6_flowinfo & IPV6_FLOWINFO_MASK;
if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
oif = u->sin6_scope_id;
@@ -118,7 +118,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
l3mdev_master_ifindex_by_index(sock_net(sk), oif) != sk->sk_bound_dev_if))
return -EINVAL;
- ipcm6_init_sk(&ipc6, np);
+ ipcm6_init_sk(&ipc6, sk);
ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 42fcec3ecf5e..a2aa54a2baae 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -291,6 +291,7 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
+ bool recverr = inet6_test_bit(RECVERR6, sk);
struct ipv6_pinfo *np = inet6_sk(sk);
int err;
int harderr;
@@ -300,26 +301,26 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
2. Socket is connected (otherwise the error indication
is useless without recverr and error is hard.
*/
- if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
+ if (!recverr && sk->sk_state != TCP_ESTABLISHED)
return;
harderr = icmpv6_err_convert(type, code, &err);
if (type == ICMPV6_PKT_TOOBIG) {
ip6_sk_update_pmtu(skb, sk, info);
- harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
+ harderr = (READ_ONCE(np->pmtudisc) == IPV6_PMTUDISC_DO);
}
if (type == NDISC_REDIRECT) {
ip6_sk_redirect(skb, sk);
return;
}
- if (np->recverr) {
+ if (recverr) {
u8 *payload = skb->data;
if (!inet_test_bit(HDRINCL, sk))
payload += offset;
ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
}
- if (np->recverr || harderr) {
+ if (recverr || harderr) {
sk->sk_err = err;
sk_error_report(sk);
}
@@ -587,7 +588,6 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
struct flowi6 *fl6, struct dst_entry **dstp,
unsigned int flags, const struct sockcm_cookie *sockc)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
struct ipv6hdr *iph;
struct sk_buff *skb;
@@ -668,7 +668,7 @@ out:
error:
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
error_check:
- if (err == -ENOBUFS && !np->recverr)
+ if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk))
err = 0;
return err;
}
@@ -795,7 +795,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return -EINVAL;
daddr = &sin6->sin6_addr;
- if (np->sndflow) {
+ if (inet6_test_bit(SNDFLOW, sk)) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
@@ -898,7 +898,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
if (ipc6.dontfrag < 0)
- ipc6.dontfrag = np->dontfrag;
+ ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9c687b357e6a..b132feae3393 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -341,7 +341,7 @@ struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
int flags)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
- 1, DST_OBSOLETE_FORCE_CHK, flags);
+ DST_OBSOLETE_FORCE_CHK, flags);
if (rt) {
rt6_info_init(rt);
@@ -2622,7 +2622,7 @@ static struct dst_entry *ip6_route_output_flags_noref(struct net *net,
if (!any_src)
flags |= RT6_LOOKUP_F_HAS_SADDR;
else if (sk)
- flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
+ flags |= rt6_srcprefs2flags(READ_ONCE(inet6_sk(sk)->srcprefs));
return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
}
@@ -2655,7 +2655,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
struct net_device *loopback_dev = net->loopback_dev;
struct dst_entry *new = NULL;
- rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
+ rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev,
DST_OBSOLETE_DEAD, 0);
if (rt) {
rt6_info_init(rt);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 44b6949d72b2..bfe7d19ff4fd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -163,7 +163,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
memset(&fl6, 0, sizeof(fl6));
- if (np->sndflow) {
+ if (inet6_test_bit(SNDFLOW, sk)) {
fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
IP6_ECN_flow_init(fl6.flowlabel);
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
@@ -508,7 +508,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
tcp_ld_RTO_revert(sk, seq);
}
- if (!sock_owned_by_user(sk) && np->recverr) {
+ if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk);
} else {
@@ -548,7 +548,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
&ireq->ir_v6_rmt_addr);
fl6->daddr = ireq->ir_v6_rmt_addr;
- if (np->repflow && ireq->pktopts)
+ if (inet6_test_bit(REPFLOW, sk) && ireq->pktopts)
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
@@ -565,7 +565,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
if (!opt)
opt = rcu_dereference(np->opt);
err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark),
- opt, tclass, sk->sk_priority);
+ opt, tclass, READ_ONCE(sk->sk_priority));
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -797,7 +797,7 @@ static void tcp_v6_init_req(struct request_sock *req,
(ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
- np->rxopt.bits.rxohlim || np->repflow)) {
+ np->rxopt.bits.rxohlim || inet6_test_bit(REPFLOW, sk_listener))) {
refcount_inc(&skb->users);
ireq->pktopts = skb;
}
@@ -1055,12 +1055,10 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
if (sk) {
oif = sk->sk_bound_dev_if;
if (sk_fullsock(sk)) {
- const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
-
trace_tcp_send_reset(sk, skb);
- if (np->repflow)
+ if (inet6_test_bit(REPFLOW, sk))
label = ip6_flowlabel(ipv6h);
- priority = sk->sk_priority;
+ priority = READ_ONCE(sk->sk_priority);
txhash = sk->sk_txhash;
}
if (sk->sk_state == TCP_TIME_WAIT) {
@@ -1247,7 +1245,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
newnp->mcast_oif = inet_iif(skb);
newnp->mcast_hops = ip_hdr(skb)->ttl;
newnp->rcv_flowinfo = 0;
- if (np->repflow)
+ if (inet6_test_bit(REPFLOW, sk))
newnp->flow_label = 0;
/*
@@ -1320,7 +1318,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
newnp->mcast_oif = tcp_v6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
- if (np->repflow)
+ if (inet6_test_bit(REPFLOW, sk))
newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
/* Set ToS of the new socket based upon the value of incoming SYN.
@@ -1542,10 +1540,11 @@ ipv6_pktoptions:
if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
np->mcast_oif = tcp_v6_iif(opt_skb);
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
- np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
+ WRITE_ONCE(np->mcast_hops,
+ ipv6_hdr(opt_skb)->hop_limit);
if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
- if (np->repflow)
+ if (inet6_test_bit(REPFLOW, sk))
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
tcp_v6_restore_cb(opt_skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 86b5d509a468..5e9312eefed0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -413,7 +413,7 @@ try_again:
(struct sockaddr *)sin6);
}
- if (udp_sk(sk)->gro_enabled)
+ if (udp_test_bit(GRO_ENABLED, sk))
udp_cmsg_recv(msg, sk, skb);
if (np->rxopt.all)
@@ -571,7 +571,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
- if (!sk || udp_sk(sk)->encap_type) {
+ if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
/* No socket for error: try tunnels before discarding */
if (static_branch_unlikely(&udpv6_encap_needed_key)) {
sk = __udp6_lib_err_encap(net, hdr, offset, uh,
@@ -598,7 +598,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!ip6_sk_accept_pmtu(sk))
goto out;
ip6_sk_update_pmtu(skb, sk, info);
- if (np->pmtudisc != IPV6_PMTUDISC_DONT)
+ if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
harderr = 1;
}
if (type == NDISC_REDIRECT) {
@@ -619,7 +619,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out;
}
- if (!np->recverr) {
+ if (!inet6_test_bit(RECVERR6, sk)) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
} else {
@@ -688,7 +688,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
}
nf_reset_ct(skb);
- if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
+ if (static_branch_unlikely(&udpv6_encap_needed_key) &&
+ READ_ONCE(up->encap_type)) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
@@ -726,16 +727,17 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
- if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
+ if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
+ u16 pcrlen = READ_ONCE(up->pcrlen);
- if (up->pcrlen == 0) { /* full coverage was set */
+ if (pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
- if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
+ if (UDP_SKB_CB(skb)->cscov < pcrlen) {
net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
- UDP_SKB_CB(skb)->cscov, up->pcrlen);
+ UDP_SKB_CB(skb)->cscov, pcrlen);
goto drop;
}
}
@@ -858,7 +860,7 @@ start_lookup:
/* If zero checksum and no_check is not on for
* the socket then skip it.
*/
- if (!uh->check && !udp_sk(sk)->no_check6_rx)
+ if (!uh->check && !udp_get_no_check6_rx(sk))
continue;
if (!first) {
first = sk;
@@ -980,7 +982,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
udp6_sk_rx_dst_set(sk, dst);
- if (!uh->check && !udp_sk(sk)->no_check6_rx) {
+ if (!uh->check && !udp_get_no_check6_rx(sk)) {
if (refcounted)
sock_put(sk);
goto report_csum_error;
@@ -1002,7 +1004,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
/* Unicast */
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
- if (!uh->check && !udp_sk(sk)->no_check6_rx)
+ if (!uh->check && !udp_get_no_check6_rx(sk))
goto report_csum_error;
return udp6_unicast_rcv_skb(sk, skb, uh);
}
@@ -1241,7 +1243,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb);
return -EINVAL;
}
- if (udp_sk(sk)->no_check6_tx) {
+ if (udp_get_no_check6_tx(sk)) {
kfree_skb(skb);
return -EINVAL;
}
@@ -1262,7 +1264,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
if (is_udplite)
csum = udplite_csum(skb);
- else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
+ else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
@@ -1281,7 +1283,7 @@ csum_partial:
send:
err = ip6_send_skb(skb);
if (err) {
- if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
+ if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
UDP6_INC_STATS(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
@@ -1332,7 +1334,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int addr_len = msg->msg_namelen;
bool connected = false;
int ulen = len;
- int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
+ int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
int err;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
@@ -1427,7 +1429,7 @@ do_udp_sendmsg:
fl6->fl6_dport = sin6->sin6_port;
daddr = &sin6->sin6_addr;
- if (np->sndflow) {
+ if (inet6_test_bit(SNDFLOW, sk)) {
fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
@@ -1593,7 +1595,7 @@ back_from_confirm:
do_append_data:
if (ipc6.dontfrag < 0)
- ipc6.dontfrag = np->dontfrag;
+ ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
up->len += ulen;
err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
&ipc6, fl6, (struct rt6_info *)dst,
@@ -1606,7 +1608,7 @@ do_append_data:
up->pending = 0;
if (err > 0)
- err = np->recverr ? net_xmit_errno(err) : 0;
+ err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
release_sock(sk);
out:
@@ -1644,11 +1646,11 @@ static void udpv6_splice_eof(struct socket *sock)
struct sock *sk = sock->sk;
struct udp_sock *up = udp_sk(sk);
- if (!up->pending || READ_ONCE(up->corkflag))
+ if (!up->pending || udp_test_bit(CORK, sk))
return;
lock_sock(sk);
- if (up->pending && !READ_ONCE(up->corkflag))
+ if (up->pending && !udp_test_bit(CORK, sk))
udp_v6_push_pending_frames(sk);
release_sock(sk);
}
@@ -1670,7 +1672,7 @@ void udpv6_destroy_sock(struct sock *sk)
if (encap_destroy)
encap_destroy(sk);
}
- if (up->encap_enabled) {
+ if (udp_test_bit(ENCAP_ENABLED, sk)) {
static_branch_dec(&udpv6_encap_needed_key);
udp_encap_disable();
}
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 267d491e9707..a60bec9b14f1 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -17,7 +17,6 @@
static int udplitev6_sk_init(struct sock *sk)
{
udpv6_init_sock(sk);
- udp_sk(sk)->pcflag = UDPLITE_BIT;
pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
"please contact the netdev mailing list\n");
return 0;
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 4907ab241d6b..4156387248e4 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -81,14 +81,14 @@ int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
struct ipv6hdr *ip6h;
int len;
int ip6hlen = sizeof(struct ipv6hdr);
-
__u8 *udpdata;
__be32 *udpdata32;
- __u16 encap_type = up->encap_type;
+ u16 encap_type;
if (skb->protocol == htons(ETH_P_IP))
return xfrm4_udp_encap_rcv(sk, skb);
+ encap_type = READ_ONCE(up->encap_type);
/* if this is not encapsulated socket, then just return now */
if (!encap_type)
return 1;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 03608d3ded4b..8d21ff25f160 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1139,9 +1139,9 @@ static void l2tp_tunnel_destruct(struct sock *sk)
switch (tunnel->encap) {
case L2TP_ENCAPTYPE_UDP:
/* No longer an encapsulation socket. See net/ipv4/udp.c */
- (udp_sk(sk))->encap_type = 0;
- (udp_sk(sk))->encap_rcv = NULL;
- (udp_sk(sk))->encap_destroy = NULL;
+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ udp_sk(sk)->encap_rcv = NULL;
+ udp_sk(sk)->encap_destroy = NULL;
break;
case L2TP_ENCAPTYPE_IP:
break;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index f2ae03c40473..25ca89f80414 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -37,12 +37,6 @@
/* via netdev_priv() */
struct l2tp_eth {
struct l2tp_session *session;
- atomic_long_t tx_bytes;
- atomic_long_t tx_packets;
- atomic_long_t tx_dropped;
- atomic_long_t rx_bytes;
- atomic_long_t rx_packets;
- atomic_long_t rx_errors;
};
/* via l2tp_session_priv() */
@@ -79,10 +73,10 @@ static netdev_tx_t l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev
int ret = l2tp_xmit_skb(session, skb);
if (likely(ret == NET_XMIT_SUCCESS)) {
- atomic_long_add(len, &priv->tx_bytes);
- atomic_long_inc(&priv->tx_packets);
+ DEV_STATS_ADD(dev, tx_bytes, len);
+ DEV_STATS_INC(dev, tx_packets);
} else {
- atomic_long_inc(&priv->tx_dropped);
+ DEV_STATS_INC(dev, tx_dropped);
}
return NETDEV_TX_OK;
}
@@ -90,14 +84,12 @@ static netdev_tx_t l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev
static void l2tp_eth_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct l2tp_eth *priv = netdev_priv(dev);
-
- stats->tx_bytes = (unsigned long)atomic_long_read(&priv->tx_bytes);
- stats->tx_packets = (unsigned long)atomic_long_read(&priv->tx_packets);
- stats->tx_dropped = (unsigned long)atomic_long_read(&priv->tx_dropped);
- stats->rx_bytes = (unsigned long)atomic_long_read(&priv->rx_bytes);
- stats->rx_packets = (unsigned long)atomic_long_read(&priv->rx_packets);
- stats->rx_errors = (unsigned long)atomic_long_read(&priv->rx_errors);
+ stats->tx_bytes = DEV_STATS_READ(dev, tx_bytes);
+ stats->tx_packets = DEV_STATS_READ(dev, tx_packets);
+ stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
+ stats->rx_bytes = DEV_STATS_READ(dev, rx_bytes);
+ stats->rx_packets = DEV_STATS_READ(dev, rx_packets);
+ stats->rx_errors = DEV_STATS_READ(dev, rx_errors);
}
static const struct net_device_ops l2tp_eth_netdev_ops = {
@@ -126,7 +118,6 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
{
struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
struct net_device *dev;
- struct l2tp_eth *priv;
if (!pskb_may_pull(skb, ETH_HLEN))
goto error;
@@ -144,12 +135,11 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
if (!dev)
goto error_rcu;
- priv = netdev_priv(dev);
if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
- atomic_long_inc(&priv->rx_packets);
- atomic_long_add(data_len, &priv->rx_bytes);
+ DEV_STATS_INC(dev, rx_packets);
+ DEV_STATS_ADD(dev, rx_bytes, data_len);
} else {
- atomic_long_inc(&priv->rx_errors);
+ DEV_STATS_INC(dev, rx_errors);
}
rcu_read_unlock();
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 11f3d375cec0..bb373e249237 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -431,7 +431,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
return -ENOTCONN;
lsa->l2tp_conn_id = lsk->peer_conn_id;
lsa->l2tp_addr = sk->sk_v6_daddr;
- if (np->sndflow)
+ if (inet6_test_bit(SNDFLOW, sk))
lsa->l2tp_flowinfo = np->flow_label;
} else {
if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
@@ -528,7 +528,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return -EAFNOSUPPORT;
daddr = &lsa->l2tp_addr;
- if (np->sndflow) {
+ if (inet6_test_bit(SNDFLOW, sk)) {
fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
@@ -620,7 +620,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
if (ipc6.dontfrag < 0)
- ipc6.dontfrag = np->dontfrag;
+ ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index a4af3b7675ef..c3a60fd19c37 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -309,7 +309,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
/* IEEE802.11ac-2013 Table E-4 */
- u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
+ static const u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
struct cfg80211_chan_def uc = sta->tdls_chandef;
enum nl80211_chan_width max_width =
ieee80211_sta_cap_chan_bw(&sta->deflink);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 8260202c0066..18ce624bfde2 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -89,7 +89,7 @@ static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, in
sock_valbool_flag(ssk, SOCK_KEEPOPEN, !!val);
break;
case SO_PRIORITY:
- ssk->sk_priority = val;
+ WRITE_ONCE(ssk->sk_priority, val);
break;
case SO_SNDBUF:
case SO_SNDBUFFORCE:
@@ -734,11 +734,11 @@ static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
lock_sock(sk);
sockopt_seq_inc(msk);
- val = inet_sk(sk)->tos;
+ val = READ_ONCE(inet_sk(sk)->tos);
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- __ip_sock_set_tos(ssk, val);
+ ip_sock_set_tos(ssk, val);
}
release_sock(sk);
@@ -1343,7 +1343,7 @@ static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname,
switch (optname) {
case IP_TOS:
- return mptcp_put_int_option(msk, optval, optlen, inet_sk(sk)->tos);
+ return mptcp_put_int_option(msk, optval, optlen, READ_ONCE(inet_sk(sk)->tos));
}
return -EOPNOTSUPP;
@@ -1411,7 +1411,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
ssk->sk_ipv6only = sk->sk_ipv6only;
- __ip_sock_set_tos(ssk, inet_sk(sk)->tos);
+ ip_sock_set_tos(ssk, inet_sk(sk)->tos);
if (sk->sk_userlocks & tx_rx_locks) {
ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 4174076c66fa..eaf9f2ed0067 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1298,17 +1298,13 @@ static void set_sock_size(struct sock *sk, int mode, int val)
static void set_mcast_loop(struct sock *sk, u_char loop)
{
/* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */
- lock_sock(sk);
inet_assign_bit(MC_LOOP, sk, loop);
#ifdef CONFIG_IP_VS_IPV6
- if (sk->sk_family == AF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
-
+ if (READ_ONCE(sk->sk_family) == AF_INET6) {
/* IPV6_MULTICAST_LOOP */
- np->mc_loop = loop ? 1 : 0;
+ inet6_assign_bit(MC6_LOOP, sk, loop);
}
#endif
- release_sock(sk);
}
/*
@@ -1320,13 +1316,13 @@ static void set_mcast_ttl(struct sock *sk, u_char ttl)
/* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */
lock_sock(sk);
- inet->mc_ttl = ttl;
+ WRITE_ONCE(inet->mc_ttl, ttl);
#ifdef CONFIG_IP_VS_IPV6
if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
/* IPV6_MULTICAST_HOPS */
- np->mcast_hops = ttl;
+ WRITE_ONCE(np->mcast_hops, ttl);
}
#endif
release_sock(sk);
@@ -1339,13 +1335,13 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
/* setsockopt(sock, SOL_IP, IP_MTU_DISCOVER, &val, sizeof(val)); */
lock_sock(sk);
- inet->pmtudisc = val;
+ WRITE_ONCE(inet->pmtudisc, val);
#ifdef CONFIG_IP_VS_IPV6
if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
/* IPV6_MTU_DISCOVER */
- np->pmtudisc = val;
+ WRITE_ONCE(np->pmtudisc, val);
}
#endif
release_sock(sk);
diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
index 48cc60084d28..5a049740758f 100644
--- a/net/netfilter/nf_nat_proto.c
+++ b/net/netfilter/nf_nat_proto.c
@@ -697,6 +697,31 @@ static int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int
}
#endif
+static bool nf_nat_inet_port_was_mangled(const struct sk_buff *skb, __be16 sport)
+{
+ enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_dir dir;
+ const struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return false;
+
+ switch (nf_ct_protonum(ct)) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ break;
+ default:
+ return false;
+ }
+
+ dir = CTINFO2DIR(ctinfo);
+ if (dir != IP_CT_DIR_ORIGINAL)
+ return false;
+
+ return ct->tuplehash[!dir].tuple.dst.u.all != sport;
+}
+
static unsigned int
nf_nat_ipv4_local_in(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -707,8 +732,20 @@ nf_nat_ipv4_local_in(void *priv, struct sk_buff *skb,
ret = nf_nat_ipv4_fn(priv, skb, state);
- if (ret == NF_ACCEPT && sk && saddr != ip_hdr(skb)->saddr &&
- !inet_sk_transparent(sk))
+ if (ret != NF_ACCEPT || !sk || inet_sk_transparent(sk))
+ return ret;
+
+ /* skb has a socket assigned via tcp edemux. We need to check
+ * if nf_nat_ipv4_fn() has mangled the packet in a way that
+ * edemux would not have found this socket.
+ *
+ * This includes both changes to the source address and changes
+ * to the source port, which are both handled by the
+ * nf_nat_ipv4_fn() call above -- long after tcp/udp early demux
+ * might have found a socket for the old (pre-snat) address.
+ */
+ if (saddr != ip_hdr(skb)->saddr ||
+ nf_nat_inet_port_was_mangled(skb, sk->sk_dport))
skb_orphan(skb); /* TCP edemux obtained wrong socket */
return ret;
@@ -938,6 +975,27 @@ nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
}
static unsigned int
+nf_nat_ipv6_local_in(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct in6_addr saddr = ipv6_hdr(skb)->saddr;
+ struct sock *sk = skb->sk;
+ unsigned int ret;
+
+ ret = nf_nat_ipv6_fn(priv, skb, state);
+
+ if (ret != NF_ACCEPT || !sk || inet_sk_transparent(sk))
+ return ret;
+
+ /* see nf_nat_ipv4_local_in */
+ if (ipv6_addr_cmp(&saddr, &ipv6_hdr(skb)->saddr) ||
+ nf_nat_inet_port_was_mangled(skb, sk->sk_dport))
+ skb_orphan(skb);
+
+ return ret;
+}
+
+static unsigned int
nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
@@ -1051,7 +1109,7 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
},
/* After packet filtering, change source */
{
- .hook = nf_nat_ipv6_fn,
+ .hook = nf_nat_ipv6_local_in,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC,
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index a72b6aeefb1b..b4405db710b0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3316,7 +3316,7 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
[NFTA_RULE_CHAIN] = { .type = NLA_STRING,
.len = NFT_CHAIN_MAXNAMELEN - 1 },
[NFTA_RULE_HANDLE] = { .type = NLA_U64 },
- [NFTA_RULE_EXPRESSIONS] = { .type = NLA_NESTED },
+ [NFTA_RULE_EXPRESSIONS] = NLA_POLICY_NESTED_ARRAY(nft_expr_policy),
[NFTA_RULE_COMPAT] = { .type = NLA_NESTED },
[NFTA_RULE_POSITION] = { .type = NLA_U64 },
[NFTA_RULE_USERDATA] = { .type = NLA_BINARY,
@@ -4254,12 +4254,16 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
[NFTA_SET_OBJ_TYPE] = { .type = NLA_U32 },
[NFTA_SET_HANDLE] = { .type = NLA_U64 },
[NFTA_SET_EXPR] = { .type = NLA_NESTED },
- [NFTA_SET_EXPRESSIONS] = { .type = NLA_NESTED },
+ [NFTA_SET_EXPRESSIONS] = NLA_POLICY_NESTED_ARRAY(nft_expr_policy),
+};
+
+static const struct nla_policy nft_concat_policy[NFTA_SET_FIELD_MAX + 1] = {
+ [NFTA_SET_FIELD_LEN] = { .type = NLA_U32 },
};
static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
[NFTA_SET_DESC_SIZE] = { .type = NLA_U32 },
- [NFTA_SET_DESC_CONCAT] = { .type = NLA_NESTED },
+ [NFTA_SET_DESC_CONCAT] = NLA_POLICY_NESTED_ARRAY(nft_concat_policy),
};
static struct nft_set *nft_set_lookup(const struct nft_table *table,
@@ -4695,8 +4699,10 @@ static int nf_tables_getset(struct sk_buff *skb, const struct nfnl_info *info,
return -EINVAL;
set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask);
- if (IS_ERR(set))
+ if (IS_ERR(set)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
return PTR_ERR(set);
+ }
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb2 == NULL)
@@ -4713,10 +4719,6 @@ err_fill_set_info:
return err;
}
-static const struct nla_policy nft_concat_policy[NFTA_SET_FIELD_MAX + 1] = {
- [NFTA_SET_FIELD_LEN] = { .type = NLA_U32 },
-};
-
static int nft_set_desc_concat_parse(const struct nlattr *attr,
struct nft_set_desc *desc)
{
@@ -5498,7 +5500,7 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
[NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING,
.len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_SET_ELEM_KEY_END] = { .type = NLA_NESTED },
- [NFTA_SET_ELEM_EXPRESSIONS] = { .type = NLA_NESTED },
+ [NFTA_SET_ELEM_EXPRESSIONS] = NLA_POLICY_NESTED_ARRAY(nft_expr_policy),
};
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
@@ -5506,7 +5508,7 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
- [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
+ [NFTA_SET_ELEM_LIST_ELEMENTS] = NLA_POLICY_NESTED_ARRAY(nft_set_elem_policy),
[NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
};
@@ -6025,8 +6027,10 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
}
set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
- if (IS_ERR(set))
+ if (IS_ERR(set)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_SET]);
return PTR_ERR(set);
+ }
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
@@ -6919,8 +6923,10 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
set = nft_set_lookup_global(net, table, nla[NFTA_SET_ELEM_LIST_SET],
nla[NFTA_SET_ELEM_LIST_SET_ID], genmask);
- if (IS_ERR(set))
+ if (IS_ERR(set)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_SET]);
return PTR_ERR(set);
+ }
if (!list_empty(&set->bindings) &&
(set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
@@ -7195,8 +7201,10 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
}
set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
- if (IS_ERR(set))
+ if (IS_ERR(set)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_SET]);
return PTR_ERR(set);
+ }
if (nft_set_is_anonymous(set))
return -EOPNOTSUPP;
@@ -8692,6 +8700,7 @@ static int nf_tables_getflowtable(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
+ struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
u8 family = info->nfmsg->nfgen_family;
struct nft_flowtable *flowtable;
@@ -8717,13 +8726,17 @@ static int nf_tables_getflowtable(struct sk_buff *skb,
table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
genmask, 0);
- if (IS_ERR(table))
+ if (IS_ERR(table)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_TABLE]);
return PTR_ERR(table);
+ }
flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
genmask);
- if (IS_ERR(flowtable))
+ if (IS_ERR(flowtable)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
return PTR_ERR(flowtable);
+ }
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 96e91ab71573..0eed00184adf 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -487,7 +487,7 @@ static struct sock *nr_make_new(struct sock *osk)
sock_init_data(NULL, sk);
sk->sk_type = osk->sk_type;
- sk->sk_priority = osk->sk_priority;
+ sk->sk_priority = READ_ONCE(osk->sk_priority);
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index fd66014d8a76..6fcd7e2ca81f 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -311,11 +311,18 @@ static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
return 0;
}
-static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
- const struct nshhdr *nh)
+static noinline_for_stack int push_nsh(struct sk_buff *skb,
+ struct sw_flow_key *key,
+ const struct nlattr *a)
{
+ u8 buffer[NSH_HDR_MAX_LEN];
+ struct nshhdr *nh = (struct nshhdr *)buffer;
int err;
+ err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
+ if (err)
+ return err;
+
err = nsh_push(skb, nh);
if (err)
return err;
@@ -873,7 +880,7 @@ static void ovs_fragment(struct net *net, struct vport *vport,
prepare_frag(vport, skb, orig_network_offset,
ovs_key_mac_proto(key));
- dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
+ dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
DST_OBSOLETE_NONE, DST_NOCOUNT);
ovs_rt.dst.dev = vport->dev;
@@ -890,7 +897,7 @@ static void ovs_fragment(struct net *net, struct vport *vport,
prepare_frag(vport, skb, orig_network_offset,
ovs_key_mac_proto(key));
memset(&ovs_rt, 0, sizeof(ovs_rt));
- dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
+ dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
DST_OBSOLETE_NONE, DST_NOCOUNT);
ovs_rt.dst.dev = vport->dev;
@@ -1439,17 +1446,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
err = pop_eth(skb, key);
break;
- case OVS_ACTION_ATTR_PUSH_NSH: {
- u8 buffer[NSH_HDR_MAX_LEN];
- struct nshhdr *nh = (struct nshhdr *)buffer;
-
- err = nsh_hdr_from_nlattr(nla_data(a), nh,
- NSH_HDR_MAX_LEN);
- if (unlikely(err))
- break;
- err = push_nsh(skb, key, nh);
+ case OVS_ACTION_ATTR_PUSH_NSH:
+ err = push_nsh(skb, key, nla_data(a));
break;
- }
case OVS_ACTION_ATTR_POP_NSH:
err = pop_nsh(skb, key);
diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h
index 0c33889a8515..ed11cd12b512 100644
--- a/net/openvswitch/meter.h
+++ b/net/openvswitch/meter.h
@@ -39,13 +39,13 @@ struct dp_meter {
u32 max_delta_t;
u64 used;
struct ovs_flow_stats stats;
- struct dp_meter_band bands[];
+ struct dp_meter_band bands[] __counted_by(n_bands);
};
struct dp_meter_instance {
struct rcu_head rcu;
u32 n_meters;
- struct dp_meter __rcu *dp_meters[];
+ struct dp_meter __rcu *dp_meters[] __counted_by(n_meters);
};
struct dp_meter_table {
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 49dafe9ac72f..0cc5a4e19900 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -583,7 +583,7 @@ static struct sock *rose_make_new(struct sock *osk)
#endif
sk->sk_type = osk->sk_type;
- sk->sk_priority = osk->sk_priority;
+ sk->sk_priority = READ_ONCE(osk->sk_priority);
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 1e20bbd687f1..1424bfeaca73 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -375,9 +375,9 @@ out:
static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
[TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
- [TCA_ROUTE4_TO] = { .type = NLA_U32 },
- [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
- [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
+ [TCA_ROUTE4_TO] = NLA_POLICY_MAX(NLA_U32, 0xFF),
+ [TCA_ROUTE4_FROM] = NLA_POLICY_MAX(NLA_U32, 0xFF),
+ [TCA_ROUTE4_IIF] = NLA_POLICY_MAX(NLA_U32, 0x7FFF),
};
static int route4_set_parms(struct net *net, struct tcf_proto *tp,
@@ -397,33 +397,37 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
return err;
if (tb[TCA_ROUTE4_TO]) {
- if (new && handle & 0x8000)
+ if (new && handle & 0x8000) {
+ NL_SET_ERR_MSG(extack, "Invalid handle");
return -EINVAL;
+ }
to = nla_get_u32(tb[TCA_ROUTE4_TO]);
- if (to > 0xFF)
- return -EINVAL;
nhandle = to;
}
+ if (tb[TCA_ROUTE4_FROM] && tb[TCA_ROUTE4_IIF]) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_ROUTE4_FROM],
+ "'from' and 'fromif' are mutually exclusive");
+ return -EINVAL;
+ }
+
if (tb[TCA_ROUTE4_FROM]) {
- if (tb[TCA_ROUTE4_IIF])
- return -EINVAL;
id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
- if (id > 0xFF)
- return -EINVAL;
nhandle |= id << 16;
} else if (tb[TCA_ROUTE4_IIF]) {
id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
- if (id > 0x7FFF)
- return -EINVAL;
nhandle |= (id | 0x8000) << 16;
} else
nhandle |= 0xFFFF << 16;
if (handle && new) {
nhandle |= handle & 0x7F00;
- if (nhandle != handle)
+ if (nhandle != handle) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Handle mismatch constructed: %x (expected: %x)",
+ handle, nhandle);
return -EINVAL;
+ }
}
if (!nhandle) {
@@ -478,7 +482,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
struct route4_filter __rcu **fp;
struct route4_filter *fold, *f1, *pfp, *f = NULL;
struct route4_bucket *b;
- struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_ROUTE4_MAX + 1];
unsigned int h, th;
int err;
@@ -489,10 +492,12 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL;
}
- if (opt == NULL)
+ if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing options");
return -EINVAL;
+ }
- err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
+ err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, tca[TCA_OPTIONS],
route4_policy, NULL);
if (err < 0)
return err;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index da34fd4c9269..09d8afd04a2a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -546,7 +546,7 @@ META_COLLECTOR(int_sk_prio)
*err = -1;
return;
}
- dst->value = sk->sk_priority;
+ dst->value = READ_ONCE(sk->sk_priority);
}
META_COLLECTOR(int_sk_rcvlowat)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index f59a2cb2c803..8eacdb54e72f 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -2,7 +2,7 @@
/*
* net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
*
- * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
+ * Copyright (C) 2013-2023 Eric Dumazet <edumazet@google.com>
*
* Meant to be mostly used for locally generated traffic :
* Fast classification depends on skb->sk being set before reaching us.
@@ -51,7 +51,8 @@
#include <net/tcp.h>
struct fq_skb_cb {
- u64 time_to_send;
+ u64 time_to_send;
+ u8 band;
};
static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
@@ -73,37 +74,41 @@ struct fq_flow {
struct sk_buff *tail; /* last skb in the list */
unsigned long age; /* (jiffies | 1UL) when flow was emptied, for gc */
};
- struct rb_node fq_node; /* anchor in fq_root[] trees */
+ union {
+ struct rb_node fq_node; /* anchor in fq_root[] trees */
+ /* Following field is only used for q->internal,
+ * because q->internal is not hashed in fq_root[]
+ */
+ u64 stat_fastpath_packets;
+ };
struct sock *sk;
u32 socket_hash; /* sk_hash */
int qlen; /* number of packets in flow queue */
-/* Second cache line, used in fq_dequeue() */
+/* Second cache line */
int credit;
- /* 32bit hole on 64bit arches */
-
+ int band;
struct fq_flow *next; /* next pointer in RR lists */
struct rb_node rate_node; /* anchor in q->delayed tree */
u64 time_next_packet;
-} ____cacheline_aligned_in_smp;
+};
struct fq_flow_head {
struct fq_flow *first;
struct fq_flow *last;
};
-struct fq_sched_data {
+struct fq_perband_flows {
struct fq_flow_head new_flows;
-
struct fq_flow_head old_flows;
+ int credit;
+ int quantum; /* based on band nr : 576KB, 192KB, 64KB */
+};
- struct rb_root delayed; /* for rate limited flows */
- u64 time_next_delayed_flow;
- u64 ktime_cache; /* copy of last ktime_get_ns() */
- unsigned long unthrottle_latency_ns;
+struct fq_sched_data {
+/* Read mostly cache line */
- struct fq_flow internal; /* for non classified or high prio packets */
u32 quantum;
u32 initial_quantum;
u32 flow_refill_delay;
@@ -117,24 +122,46 @@ struct fq_sched_data {
u8 rate_enable;
u8 fq_trees_log;
u8 horizon_drop;
+ u8 prio2band[(TC_PRIO_MAX + 1) >> 2];
+ u32 timer_slack; /* hrtimer slack in ns */
+
+/* Read/Write fields. */
+
+ unsigned int band_nr; /* band being serviced in fq_dequeue() */
+
+ struct fq_perband_flows band_flows[FQ_BANDS];
+
+ struct fq_flow internal; /* fastpath queue. */
+ struct rb_root delayed; /* for rate limited flows */
+ u64 time_next_delayed_flow;
+ unsigned long unthrottle_latency_ns;
+
+ u32 band_pkt_count[FQ_BANDS];
u32 flows;
- u32 inactive_flows;
+ u32 inactive_flows; /* Flows with no packet to send. */
u32 throttled_flows;
- u64 stat_gc_flows;
- u64 stat_internal_packets;
u64 stat_throttled;
+ struct qdisc_watchdog watchdog;
+ u64 stat_gc_flows;
+
+/* Seldom used fields. */
+
+ u64 stat_band_drops[FQ_BANDS];
u64 stat_ce_mark;
u64 stat_horizon_drops;
u64 stat_horizon_caps;
u64 stat_flows_plimit;
u64 stat_pkts_too_long;
u64 stat_allocation_errors;
-
- u32 timer_slack; /* hrtimer slack in ns */
- struct qdisc_watchdog watchdog;
};
+/* return the i-th 2-bit value ("crumb") */
+static u8 fq_prio2band(const u8 *prio2band, unsigned int prio)
+{
+ return (prio2band[prio / 4] >> (2 * (prio & 0x3))) & 0x3;
+}
+
/*
* f->tail and f->age share the same location.
* We can use the low order bit to differentiate if this location points
@@ -159,8 +186,19 @@ static bool fq_flow_is_throttled(const struct fq_flow *f)
return f->next == &throttled;
}
-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+enum new_flow {
+ NEW_FLOW,
+ OLD_FLOW
+};
+
+static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow,
+ enum new_flow list_sel)
{
+ struct fq_perband_flows *pband = &q->band_flows[flow->band];
+ struct fq_flow_head *head = (list_sel == NEW_FLOW) ?
+ &pband->new_flows :
+ &pband->old_flows;
+
if (head->first)
head->last->next = flow;
else
@@ -173,7 +211,7 @@ static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
{
rb_erase(&f->rate_node, &q->delayed);
q->throttled_flows--;
- fq_flow_add_tail(&q->old_flows, f);
+ fq_flow_add_tail(q, f, OLD_FLOW);
}
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
@@ -258,17 +296,61 @@ static void fq_gc(struct fq_sched_data *q,
kmem_cache_free_bulk(fq_flow_cachep, fcnt, tofree);
}
-static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
+/* Fast path can be used if :
+ * 1) Packet tstamp is in the past.
+ * 2) FQ qlen == 0 OR
+ * (no flow is currently eligible for transmit,
+ * AND fast path queue has less than 8 packets)
+ * 3) No SO_MAX_PACING_RATE on the socket (if any).
+ * 4) No @maxrate attribute on this qdisc,
+ *
+ * FQ can not use generic TCQ_F_CAN_BYPASS infrastructure.
+ */
+static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
+ u64 now)
{
+ const struct fq_sched_data *q = qdisc_priv(sch);
+ const struct sock *sk;
+
+ if (fq_skb_cb(skb)->time_to_send > now)
+ return false;
+
+ if (sch->q.qlen != 0) {
+ /* Even if some packets are stored in this qdisc,
+ * we can still enable fast path if all of them are
+ * scheduled in the future (ie no flows are eligible)
+ * or in the fast path queue.
+ */
+ if (q->flows != q->inactive_flows + q->throttled_flows)
+ return false;
+
+ /* Do not allow fast path queue to explode, we want Fair Queue mode
+ * under pressure.
+ */
+ if (q->internal.qlen >= 8)
+ return false;
+ }
+
+ sk = skb->sk;
+ if (sk && sk_fullsock(sk) && !sk_is_tcp(sk) &&
+ sk->sk_max_pacing_rate != ~0UL)
+ return false;
+
+ if (q->flow_max_rate != ~0UL)
+ return false;
+
+ return true;
+}
+
+static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
+ u64 now)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
struct rb_node **p, *parent;
struct sock *sk = skb->sk;
struct rb_root *root;
struct fq_flow *f;
- /* warning: no starvation prevention... */
- if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
- return &q->internal;
-
/* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
* or a listener (SYNCOOKIE mode)
* 1) request sockets are not full blown,
@@ -299,11 +381,14 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
sk = (struct sock *)((hash << 1) | 1UL);
}
+ if (fq_fastpath_check(sch, skb, now)) {
+ q->internal.stat_fastpath_packets++;
+ return &q->internal;
+ }
+
root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
- if (q->flows >= (2U << q->fq_trees_log) &&
- q->inactive_flows > q->flows/2)
- fq_gc(q, root, sk);
+ fq_gc(q, root, sk);
p = &root->rb_node;
parent = NULL;
@@ -396,7 +481,6 @@ static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow,
{
fq_erase_head(sch, flow, skb);
skb_mark_not_on_list(skb);
- flow->qlen--;
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
}
@@ -434,9 +518,9 @@ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
}
static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
- const struct fq_sched_data *q)
+ const struct fq_sched_data *q, u64 now)
{
- return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon));
+ return unlikely((s64)skb->tstamp > (s64)(now + q->horizon));
}
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -444,53 +528,57 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
struct fq_sched_data *q = qdisc_priv(sch);
struct fq_flow *f;
+ u64 now;
+ u8 band;
- if (unlikely(sch->q.qlen >= sch->limit))
+ band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX);
+ if (unlikely(q->band_pkt_count[band] >= sch->limit)) {
+ q->stat_band_drops[band]++;
return qdisc_drop(skb, sch, to_free);
+ }
+ now = ktime_get_ns();
if (!skb->tstamp) {
- fq_skb_cb(skb)->time_to_send = q->ktime_cache = ktime_get_ns();
+ fq_skb_cb(skb)->time_to_send = now;
} else {
- /* Check if packet timestamp is too far in the future.
- * Try first if our cached value, to avoid ktime_get_ns()
- * cost in most cases.
- */
- if (fq_packet_beyond_horizon(skb, q)) {
- /* Refresh our cache and check another time */
- q->ktime_cache = ktime_get_ns();
- if (fq_packet_beyond_horizon(skb, q)) {
- if (q->horizon_drop) {
+ /* Check if packet timestamp is too far in the future. */
+ if (fq_packet_beyond_horizon(skb, q, now)) {
+ if (q->horizon_drop) {
q->stat_horizon_drops++;
return qdisc_drop(skb, sch, to_free);
- }
- q->stat_horizon_caps++;
- skb->tstamp = q->ktime_cache + q->horizon;
}
+ q->stat_horizon_caps++;
+ skb->tstamp = now + q->horizon;
}
fq_skb_cb(skb)->time_to_send = skb->tstamp;
}
- f = fq_classify(skb, q);
- if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
- q->stat_flows_plimit++;
- return qdisc_drop(skb, sch, to_free);
- }
+ f = fq_classify(sch, skb, now);
- f->qlen++;
- qdisc_qstats_backlog_inc(sch, skb);
- if (fq_flow_is_detached(f)) {
- fq_flow_add_tail(&q->new_flows, f);
- if (time_after(jiffies, f->age + q->flow_refill_delay))
- f->credit = max_t(u32, f->credit, q->quantum);
- q->inactive_flows--;
+ if (f != &q->internal) {
+ if (unlikely(f->qlen >= q->flow_plimit)) {
+ q->stat_flows_plimit++;
+ return qdisc_drop(skb, sch, to_free);
+ }
+
+ if (fq_flow_is_detached(f)) {
+ fq_flow_add_tail(q, f, NEW_FLOW);
+ if (time_after(jiffies, f->age + q->flow_refill_delay))
+ f->credit = max_t(u32, f->credit, q->quantum);
+ }
+
+ f->band = band;
+ q->band_pkt_count[band]++;
+ fq_skb_cb(skb)->band = band;
+ if (f->qlen == 0)
+ q->inactive_flows--;
}
+ f->qlen++;
/* Note: this overwrites f->age */
flow_queue_add(f, skb);
- if (unlikely(f == &q->internal)) {
- q->stat_internal_packets++;
- }
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -523,13 +611,26 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
}
}
+static struct fq_flow_head *fq_pband_head_select(struct fq_perband_flows *pband)
+{
+ if (pband->credit <= 0)
+ return NULL;
+
+ if (pband->new_flows.first)
+ return &pband->new_flows;
+
+ return pband->old_flows.first ? &pband->old_flows : NULL;
+}
+
static struct sk_buff *fq_dequeue(struct Qdisc *sch)
{
struct fq_sched_data *q = qdisc_priv(sch);
+ struct fq_perband_flows *pband;
struct fq_flow_head *head;
struct sk_buff *skb;
struct fq_flow *f;
unsigned long rate;
+ int retry;
u32 plen;
u64 now;
@@ -538,30 +639,38 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
skb = fq_peek(&q->internal);
if (unlikely(skb)) {
+ q->internal.qlen--;
fq_dequeue_skb(sch, &q->internal, skb);
goto out;
}
- q->ktime_cache = now = ktime_get_ns();
+ now = ktime_get_ns();
fq_check_throttled(q, now);
+ retry = 0;
+ pband = &q->band_flows[q->band_nr];
begin:
- head = &q->new_flows;
- if (!head->first) {
- head = &q->old_flows;
- if (!head->first) {
- if (q->time_next_delayed_flow != ~0ULL)
- qdisc_watchdog_schedule_range_ns(&q->watchdog,
+ head = fq_pband_head_select(pband);
+ if (!head) {
+ while (++retry < FQ_BANDS) {
+ if (++q->band_nr == FQ_BANDS)
+ q->band_nr = 0;
+ pband = &q->band_flows[q->band_nr];
+ pband->credit = min(pband->credit + pband->quantum,
+ pband->quantum);
+ goto begin;
+ }
+ if (q->time_next_delayed_flow != ~0ULL)
+ qdisc_watchdog_schedule_range_ns(&q->watchdog,
q->time_next_delayed_flow,
q->timer_slack);
- return NULL;
- }
+ return NULL;
}
f = head->first;
-
+ retry = 0;
if (f->credit <= 0) {
f->credit += q->quantum;
head->first = f->next;
- fq_flow_add_tail(&q->old_flows, f);
+ fq_flow_add_tail(q, f, OLD_FLOW);
goto begin;
}
@@ -581,20 +690,23 @@ begin:
INET_ECN_set_ce(skb);
q->stat_ce_mark++;
}
+ if (--f->qlen == 0)
+ q->inactive_flows++;
+ q->band_pkt_count[fq_skb_cb(skb)->band]--;
fq_dequeue_skb(sch, f, skb);
} else {
head->first = f->next;
/* force a pass through old_flows to prevent starvation */
- if ((head == &q->new_flows) && q->old_flows.first) {
- fq_flow_add_tail(&q->old_flows, f);
+ if (head == &pband->new_flows) {
+ fq_flow_add_tail(q, f, OLD_FLOW);
} else {
fq_flow_set_detached(f);
- q->inactive_flows++;
}
goto begin;
}
plen = qdisc_pkt_len(skb);
f->credit -= plen;
+ pband->credit -= plen;
if (!q->rate_enable)
goto out;
@@ -607,7 +719,7 @@ begin:
*/
if (!skb->tstamp) {
if (skb->sk)
- rate = min(skb->sk->sk_pacing_rate, rate);
+ rate = min(READ_ONCE(skb->sk->sk_pacing_rate), rate);
if (rate <= q->low_rate_threshold) {
f->credit = 0;
@@ -686,8 +798,10 @@ static void fq_reset(struct Qdisc *sch)
kmem_cache_free(fq_flow_cachep, f);
}
}
- q->new_flows.first = NULL;
- q->old_flows.first = NULL;
+ for (idx = 0; idx < FQ_BANDS; idx++) {
+ q->band_flows[idx].new_flows.first = NULL;
+ q->band_flows[idx].old_flows.first = NULL;
+ }
q->delayed = RB_ROOT;
q->flows = 0;
q->inactive_flows = 0;
@@ -801,8 +915,77 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 },
[TCA_FQ_HORIZON] = { .type = NLA_U32 },
[TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 },
+ [TCA_FQ_PRIOMAP] = {
+ .type = NLA_BINARY,
+ .len = sizeof(struct tc_prio_qopt),
+ },
+ [TCA_FQ_WEIGHTS] = {
+ .type = NLA_BINARY,
+ .len = FQ_BANDS * sizeof(s32),
+ },
};
+/* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
+static void fq_prio2band_compress_crumb(const u8 *in, u8 *out)
+{
+ const int num_elems = TC_PRIO_MAX + 1;
+ int i;
+
+ memset(out, 0, num_elems / 4);
+ for (i = 0; i < num_elems; i++)
+ out[i / 4] |= in[i] << (2 * (i & 0x3));
+}
+
+static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out)
+{
+ const int num_elems = TC_PRIO_MAX + 1;
+ int i;
+
+ for (i = 0; i < num_elems; i++)
+ out[i] = fq_prio2band(in, i);
+}
+
+static int fq_load_weights(struct fq_sched_data *q,
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ s32 *weights = nla_data(attr);
+ int i;
+
+ for (i = 0; i < FQ_BANDS; i++) {
+ if (weights[i] < FQ_MIN_WEIGHT) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Weight %d less that minimum allowed %d",
+ weights[i], FQ_MIN_WEIGHT);
+ return -EINVAL;
+ }
+ }
+ for (i = 0; i < FQ_BANDS; i++)
+ q->band_flows[i].quantum = weights[i];
+ return 0;
+}
+
+static int fq_load_priomap(struct fq_sched_data *q,
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ const struct tc_prio_qopt *map = nla_data(attr);
+ int i;
+
+ if (map->bands != FQ_BANDS) {
+ NL_SET_ERR_MSG_MOD(extack, "FQ only supports 3 bands");
+ return -EINVAL;
+ }
+ for (i = 0; i < TC_PRIO_MAX + 1; i++) {
+ if (map->priomap[i] >= FQ_BANDS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "FQ priomap field %d maps to a too high band %d",
+ i, map->priomap[i]);
+ return -EINVAL;
+ }
+ }
+ fq_prio2band_compress_crumb(map->priomap, q->prio2band);
+ return 0;
+}
+
static int fq_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -877,6 +1060,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
}
+ if (!err && tb[TCA_FQ_PRIOMAP])
+ err = fq_load_priomap(q, tb[TCA_FQ_PRIOMAP], extack);
+
+ if (!err && tb[TCA_FQ_WEIGHTS])
+ err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack);
+
if (tb[TCA_FQ_ORPHAN_MASK])
q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
@@ -928,7 +1117,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct fq_sched_data *q = qdisc_priv(sch);
- int err;
+ int i, err;
sch->limit = 10000;
q->flow_plimit = 100;
@@ -938,8 +1127,13 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
q->flow_max_rate = ~0UL;
q->time_next_delayed_flow = ~0ULL;
q->rate_enable = 1;
- q->new_flows.first = NULL;
- q->old_flows.first = NULL;
+ for (i = 0; i < FQ_BANDS; i++) {
+ q->band_flows[i].new_flows.first = NULL;
+ q->band_flows[i].old_flows.first = NULL;
+ }
+ q->band_flows[0].quantum = 9 << 16;
+ q->band_flows[1].quantum = 3 << 16;
+ q->band_flows[2].quantum = 1 << 16;
q->delayed = RB_ROOT;
q->fq_root = NULL;
q->fq_trees_log = ilog2(1024);
@@ -954,6 +1148,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
/* Default ce_threshold of 4294 seconds */
q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
+ fq_prio2band_compress_crumb(sch_default_prio2band, q->prio2band);
qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
if (opt)
@@ -968,8 +1163,12 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct fq_sched_data *q = qdisc_priv(sch);
u64 ce_threshold = q->ce_threshold;
+ struct tc_prio_qopt prio = {
+ .bands = FQ_BANDS,
+ };
u64 horizon = q->horizon;
struct nlattr *opts;
+ s32 weights[3];
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (opts == NULL)
@@ -999,6 +1198,16 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u8(skb, TCA_FQ_HORIZON_DROP, q->horizon_drop))
goto nla_put_failure;
+ fq_prio2band_decompress_crumb(q->prio2band, prio.priomap);
+ if (nla_put(skb, TCA_FQ_PRIOMAP, sizeof(prio), &prio))
+ goto nla_put_failure;
+
+ weights[0] = q->band_flows[0].quantum;
+ weights[1] = q->band_flows[1].quantum;
+ weights[2] = q->band_flows[2].quantum;
+ if (nla_put(skb, TCA_FQ_WEIGHTS, sizeof(weights), &weights))
+ goto nla_put_failure;
+
return nla_nest_end(skb, opts);
nla_put_failure:
@@ -1009,11 +1218,15 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct fq_sched_data *q = qdisc_priv(sch);
struct tc_fq_qd_stats st;
+ int i;
+
+ st.pad = 0;
sch_tree_lock(sch);
st.gc_flows = q->stat_gc_flows;
- st.highprio_packets = q->stat_internal_packets;
+ st.highprio_packets = 0;
+ st.fastpath_packets = q->internal.stat_fastpath_packets;
st.tcp_retrans = 0;
st.throttled = q->stat_throttled;
st.flows_plimit = q->stat_flows_plimit;
@@ -1029,6 +1242,10 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
st.ce_mark = q->stat_ce_mark;
st.horizon_drops = q->stat_horizon_drops;
st.horizon_caps = q->stat_horizon_caps;
+ for (i = 0; i < FQ_BANDS; i++) {
+ st.band_drops[i] = q->stat_band_drops[i];
+ st.band_pkt_count[i] = q->band_pkt_count[i];
+ }
sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st));
@@ -1056,7 +1273,7 @@ static int __init fq_module_init(void)
fq_flow_cachep = kmem_cache_create("fq_flow_cache",
sizeof(struct fq_flow),
- 0, 0, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!fq_flow_cachep)
return -ENOMEM;
diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
index a9bd0a235890..ce63414185fd 100644
--- a/net/sched/sch_frag.c
+++ b/net/sched/sch_frag.c
@@ -96,7 +96,7 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
unsigned long orig_dst;
sch_frag_prepare_frag(skb, xmit);
- dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
+ dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
DST_OBSOLETE_NONE, DST_NOCOUNT);
sch_frag_rt.dst.dev = skb->dev;
@@ -112,7 +112,7 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
sch_frag_prepare_frag(skb, xmit);
memset(&sch_frag_rt, 0, sizeof(sch_frag_rt));
- dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
+ dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
DST_OBSOLETE_NONE, DST_NOCOUNT);
sch_frag_rt.dst.dev = skb->dev;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5d7e23f4cc0e..4195a4bc26ca 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -694,9 +694,10 @@ struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
.owner = THIS_MODULE,
};
-static const u8 prio2band[TC_PRIO_MAX + 1] = {
- 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+const u8 sch_default_prio2band[TC_PRIO_MAX + 1] = {
+ 1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1
};
+EXPORT_SYMBOL(sch_default_prio2band);
/* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination.
@@ -721,7 +722,7 @@ static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
- int band = prio2band[skb->priority & TC_PRIO_MAX];
+ int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX];
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
struct skb_array *q = band2list(priv, band);
unsigned int pkt_len = qdisc_pkt_len(skb);
@@ -830,7 +831,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
- memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
+ memcpy(&opt.priomap, sch_default_prio2band, TC_PRIO_MAX + 1);
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 43f2731bf590..24368f755ab1 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -128,7 +128,6 @@ static void sctp_v6_err_handle(struct sctp_transport *t, struct sk_buff *skb,
{
struct sctp_association *asoc = t->asoc;
struct sock *sk = asoc->base.sk;
- struct ipv6_pinfo *np;
int err = 0;
switch (type) {
@@ -149,9 +148,8 @@ static void sctp_v6_err_handle(struct sctp_transport *t, struct sk_buff *skb,
break;
}
- np = inet6_sk(sk);
icmpv6_err_convert(type, code, &err);
- if (!sock_owned_by_user(sk) && np->recverr) {
+ if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
sk->sk_err = err;
sk_error_report(sk);
} else {
@@ -249,7 +247,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
rcu_read_lock();
res = ip6_xmit(sk, skb, fl6, sk->sk_mark,
rcu_dereference(np->opt),
- tclass, sk->sk_priority);
+ tclass, READ_ONCE(sk->sk_priority));
rcu_read_unlock();
return res;
}
@@ -298,7 +296,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (t->flowlabel & SCTP_FLOWLABEL_SET_MASK)
fl6->flowlabel = htonl(t->flowlabel & SCTP_FLOWLABEL_VAL_MASK);
- if (np->sndflow && (fl6->flowlabel & IPV6_FLOWLABEL_MASK)) {
+ if (inet6_test_bit(SNDFLOW, sk) &&
+ (fl6->flowlabel & IPV6_FLOWLABEL_MASK)) {
struct ip6_flowlabel *flowlabel;
flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 2185f44198de..94c6dd53cd62 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -426,7 +426,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
struct dst_entry *dst = NULL;
union sctp_addr *daddr = &t->ipaddr;
union sctp_addr dst_saddr;
- __u8 tos = inet_sk(sk)->tos;
+ u8 tos = READ_ONCE(inet_sk(sk)->tos);
if (t->dscp & SCTP_DSCP_SET_MASK)
tos = t->dscp & SCTP_DSCP_VAL_MASK;
@@ -1057,7 +1057,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
struct flowi4 *fl4 = &t->fl.u.ip4;
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
- __u8 dscp = inet->tos;
+ __u8 dscp = READ_ONCE(inet->tos);
__be16 df = 0;
pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb,
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 08527d882e56..f80208edd6a5 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3303,7 +3303,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
/* Process the TLVs contained within the ASCONF chunk. */
sctp_walk_params(param, addip) {
- /* Skip preceeding address parameters. */
+ /* Skip preceding address parameters. */
if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
param.p->type == SCTP_PARAM_IPV6_ADDRESS)
continue;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index bacdd971615e..297681601414 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -493,7 +493,7 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
nsk->sk_sndtimeo = osk->sk_sndtimeo;
nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
nsk->sk_mark = READ_ONCE(osk->sk_mark);
- nsk->sk_priority = osk->sk_priority;
+ nsk->sk_priority = READ_ONCE(osk->sk_priority);
nsk->sk_rcvlowat = osk->sk_rcvlowat;
nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
nsk->sk_err = osk->sk_err;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index e33b4f29f77c..d0143823658d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1446,7 +1446,7 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
p = (struct tipc_gap_ack_blks *)msg_data(hdr);
sz = ntohs(p->len);
/* Sanity check */
- if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
+ if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
/* Good, check if the desired type exists */
if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
goto ok;
@@ -1533,7 +1533,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
__tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
/* Total len */
- len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
+ len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
ga->len = htons(len);
return len;
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d1fc295b83b5..270712b8d391 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1487,7 +1487,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
*/
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
aead_size = ALIGN(aead_size, __alignof__(*dctx));
- mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
+ mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
sk->sk_allocation);
if (!mem) {
err = -ENOMEM;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 020cf17ab7e4..013b65241b65 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1921,6 +1921,9 @@ out_err:
err = total_written;
}
out:
+ if (sk->sk_type == SOCK_STREAM)
+ err = sk_stream_error(sk, msg->msg_flags, err);
+
release_sock(sk);
return err;
}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index e95df847176b..09ba3128e759 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -63,6 +63,17 @@ struct virtio_vsock {
u32 guest_cid;
bool seqpacket_allow;
+
+ /* These fields are used only in tx path in function
+ * 'virtio_transport_send_pkt_work()', so to save
+ * stack space in it, place both of them here. Each
+ * pointer from 'out_sgs' points to the corresponding
+ * element in 'out_bufs' - this is initialized in
+ * 'virtio_vsock_probe()'. Both fields are protected
+ * by 'tx_lock'. +1 is needed for packet header.
+ */
+ struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1];
+ struct scatterlist out_bufs[MAX_SKB_FRAGS + 1];
};
static u32 virtio_transport_get_local_cid(void)
@@ -100,8 +111,8 @@ virtio_transport_send_pkt_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];
for (;;) {
- struct scatterlist hdr, buf, *sgs[2];
int ret, in_sg = 0, out_sg = 0;
+ struct scatterlist **sgs;
struct sk_buff *skb;
bool reply;
@@ -111,12 +122,43 @@ virtio_transport_send_pkt_work(struct work_struct *work)
virtio_transport_deliver_tap_pkt(skb);
reply = virtio_vsock_skb_reply(skb);
-
- sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
- sgs[out_sg++] = &hdr;
- if (skb->len > 0) {
- sg_init_one(&buf, skb->data, skb->len);
- sgs[out_sg++] = &buf;
+ sgs = vsock->out_sgs;
+ sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
+ sizeof(*virtio_vsock_hdr(skb)));
+ out_sg++;
+
+ if (!skb_is_nonlinear(skb)) {
+ if (skb->len > 0) {
+ sg_init_one(sgs[out_sg], skb->data, skb->len);
+ out_sg++;
+ }
+ } else {
+ struct skb_shared_info *si;
+ int i;
+
+ /* If skb is nonlinear, then its buffer must contain
+ * only header and nothing more. Data is stored in
+ * the fragged part.
+ */
+ WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
+
+ si = skb_shinfo(skb);
+
+ for (i = 0; i < si->nr_frags; i++) {
+ skb_frag_t *skb_frag = &si->frags[i];
+ void *va;
+
+ /* We will use 'page_to_virt()' for the userspace page
+ * here, because virtio or dma-mapping layers will call
+ * 'virt_to_phys()' later to fill the buffer descriptor.
+ * We don't touch memory at "virtual" address of this page.
+ */
+ va = page_to_virt(skb_frag->bv_page);
+ sg_init_one(sgs[out_sg],
+ va + skb_frag->bv_offset,
+ skb_frag->bv_len);
+ out_sg++;
+ }
}
ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
@@ -413,6 +455,37 @@ static void virtio_vsock_rx_done(struct virtqueue *vq)
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
}
+static bool virtio_transport_can_msgzerocopy(int bufs_num)
+{
+ struct virtio_vsock *vsock;
+ bool res = false;
+
+ rcu_read_lock();
+
+ vsock = rcu_dereference(the_virtio_vsock);
+ if (vsock) {
+ struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
+
+ /* Check that tx queue is large enough to keep whole
+ * data to send. This is needed, because when there is
+ * not enough free space in the queue, current skb to
+ * send will be reinserted to the head of tx list of
+ * the socket to retry transmission later, so if skb
+ * is bigger than whole queue, it will be reinserted
+ * again and again, thus blocking other skbs to be sent.
+ * Each page of the user provided buffer will be added
+ * as a single buffer to the tx virtqueue, so compare
+ * number of pages against maximum capacity of the queue.
+ */
+ if (bufs_num <= vq->num_max)
+ res = true;
+ }
+
+ rcu_read_unlock();
+
+ return res;
+}
+
static bool virtio_transport_seqpacket_allow(u32 remote_cid);
static struct virtio_transport virtio_transport = {
@@ -462,6 +535,7 @@ static struct virtio_transport virtio_transport = {
},
.send_pkt = virtio_transport_send_pkt,
+ .can_msgzerocopy = virtio_transport_can_msgzerocopy,
};
static bool virtio_transport_seqpacket_allow(u32 remote_cid)
@@ -621,6 +695,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
{
struct virtio_vsock *vsock = NULL;
int ret;
+ int i;
ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
if (ret)
@@ -663,6 +738,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
if (ret < 0)
goto out;
+ for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++)
+ vsock->out_sgs[i] = &vsock->out_bufs[i];
+
rcu_assign_pointer(the_virtio_vsock, vsock);
mutex_unlock(&the_virtio_vsock_mutex);
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 352d042b130b..e22c81435ef7 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -37,27 +37,89 @@ virtio_transport_get_ops(struct vsock_sock *vsk)
return container_of(t, struct virtio_transport, transport);
}
-/* Returns a new packet on success, otherwise returns NULL.
- *
- * If NULL is returned, errp is set to a negative errno.
- */
-static struct sk_buff *
-virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
- size_t len,
- u32 src_cid,
- u32 src_port,
- u32 dst_cid,
- u32 dst_port)
-{
- const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len;
- struct virtio_vsock_hdr *hdr;
- struct sk_buff *skb;
- void *payload;
- int err;
+static bool virtio_transport_can_zcopy(const struct virtio_transport *t_ops,
+ struct virtio_vsock_pkt_info *info,
+ size_t pkt_len)
+{
+ struct iov_iter *iov_iter;
- skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
- if (!skb)
- return NULL;
+ if (!info->msg)
+ return false;
+
+ iov_iter = &info->msg->msg_iter;
+
+ if (iov_iter->iov_offset)
+ return false;
+
+ /* We can't send whole iov. */
+ if (iov_iter->count > pkt_len)
+ return false;
+
+ /* Check that transport can send data in zerocopy mode. */
+ t_ops = virtio_transport_get_ops(info->vsk);
+
+ if (t_ops->can_msgzerocopy) {
+ int pages_in_iov = iov_iter_npages(iov_iter, MAX_SKB_FRAGS);
+ int pages_to_send = min(pages_in_iov, MAX_SKB_FRAGS);
+
+ /* +1 is for packet header. */
+ return t_ops->can_msgzerocopy(pages_to_send + 1);
+ }
+
+ return true;
+}
+
+static int virtio_transport_init_zcopy_skb(struct vsock_sock *vsk,
+ struct sk_buff *skb,
+ struct msghdr *msg,
+ bool zerocopy)
+{
+ struct ubuf_info *uarg;
+
+ if (msg->msg_ubuf) {
+ uarg = msg->msg_ubuf;
+ net_zcopy_get(uarg);
+ } else {
+ struct iov_iter *iter = &msg->msg_iter;
+ struct ubuf_info_msgzc *uarg_zc;
+
+ uarg = msg_zerocopy_realloc(sk_vsock(vsk),
+ iter->count,
+ NULL);
+ if (!uarg)
+ return -1;
+
+ uarg_zc = uarg_to_msgzc(uarg);
+ uarg_zc->zerocopy = zerocopy ? 1 : 0;
+ }
+
+ skb_zcopy_init(skb, uarg);
+
+ return 0;
+}
+
+static int virtio_transport_fill_skb(struct sk_buff *skb,
+ struct virtio_vsock_pkt_info *info,
+ size_t len,
+ bool zcopy)
+{
+ if (zcopy)
+ return __zerocopy_sg_from_iter(info->msg, NULL, skb,
+ &info->msg->msg_iter,
+ len);
+
+ return memcpy_from_msg(skb_put(skb, len), info->msg, len);
+}
+
+static void virtio_transport_init_hdr(struct sk_buff *skb,
+ struct virtio_vsock_pkt_info *info,
+ size_t payload_len,
+ u32 src_cid,
+ u32 src_port,
+ u32 dst_cid,
+ u32 dst_port)
+{
+ struct virtio_vsock_hdr *hdr;
hdr = virtio_vsock_hdr(skb);
hdr->type = cpu_to_le16(info->type);
@@ -67,43 +129,28 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
hdr->src_port = cpu_to_le32(src_port);
hdr->dst_port = cpu_to_le32(dst_port);
hdr->flags = cpu_to_le32(info->flags);
- hdr->len = cpu_to_le32(len);
-
- if (info->msg && len > 0) {
- payload = skb_put(skb, len);
- err = memcpy_from_msg(payload, info->msg, len);
- if (err)
- goto out;
-
- if (msg_data_left(info->msg) == 0 &&
- info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
- hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
-
- if (info->msg->msg_flags & MSG_EOR)
- hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
- }
- }
+ hdr->len = cpu_to_le32(payload_len);
+}
- if (info->reply)
- virtio_vsock_skb_set_reply(skb);
+static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb,
+ void *dst,
+ size_t len)
+{
+ struct iov_iter iov_iter = { 0 };
+ struct kvec kvec;
+ size_t to_copy;
- trace_virtio_transport_alloc_pkt(src_cid, src_port,
- dst_cid, dst_port,
- len,
- info->type,
- info->op,
- info->flags);
+ kvec.iov_base = dst;
+ kvec.iov_len = len;
- if (info->vsk && !skb_set_owner_sk_safe(skb, sk_vsock(info->vsk))) {
- WARN_ONCE(1, "failed to allocate skb on vsock socket with sk_refcnt == 0\n");
- goto out;
- }
+ iov_iter.iter_type = ITER_KVEC;
+ iov_iter.kvec = &kvec;
+ iov_iter.nr_segs = 1;
- return skb;
+ to_copy = min_t(size_t, len, skb->len);
-out:
- kfree_skb(skb);
- return NULL;
+ skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
+ &iov_iter, to_copy);
}
/* Packet capture */
@@ -114,7 +161,6 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
struct af_vsockmon_hdr *hdr;
struct sk_buff *skb;
size_t payload_len;
- void *payload_buf;
/* A packet could be split to fit the RX buffer, so we can retrieve
* the payload length from the header and the buffer pointer taking
@@ -122,7 +168,6 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
*/
pkt_hdr = virtio_vsock_hdr(pkt);
payload_len = pkt->len;
- payload_buf = pkt->data;
skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
GFP_ATOMIC);
@@ -165,7 +210,13 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
if (payload_len) {
- skb_put_data(skb, payload_buf, payload_len);
+ if (skb_is_nonlinear(pkt)) {
+ void *data = skb_put(skb, payload_len);
+
+ virtio_transport_copy_nonlinear_skb(pkt, data, payload_len);
+ } else {
+ skb_put_data(skb, pkt->data, payload_len);
+ }
}
return skb;
@@ -189,6 +240,82 @@ static u16 virtio_transport_get_type(struct sock *sk)
return VIRTIO_VSOCK_TYPE_SEQPACKET;
}
+/* Returns new sk_buff on success, otherwise returns NULL. */
+static struct sk_buff *virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
+ size_t payload_len,
+ bool zcopy,
+ u32 src_cid,
+ u32 src_port,
+ u32 dst_cid,
+ u32 dst_port)
+{
+ struct vsock_sock *vsk;
+ struct sk_buff *skb;
+ size_t skb_len;
+
+ skb_len = VIRTIO_VSOCK_SKB_HEADROOM;
+
+ if (!zcopy)
+ skb_len += payload_len;
+
+ skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ virtio_transport_init_hdr(skb, info, payload_len, src_cid, src_port,
+ dst_cid, dst_port);
+
+ vsk = info->vsk;
+
+ /* If 'vsk' != NULL then payload is always present, so we
+ * will never call '__zerocopy_sg_from_iter()' below without
+ * setting skb owner in 'skb_set_owner_w()'. The only case
+ * when 'vsk' == NULL is VIRTIO_VSOCK_OP_RST control message
+ * without payload.
+ */
+ WARN_ON_ONCE(!(vsk && (info->msg && payload_len)) && zcopy);
+
+ /* Set owner here, because '__zerocopy_sg_from_iter()' uses
+ * owner of skb without check to update 'sk_wmem_alloc'.
+ */
+ if (vsk)
+ skb_set_owner_w(skb, sk_vsock(vsk));
+
+ if (info->msg && payload_len > 0) {
+ int err;
+
+ err = virtio_transport_fill_skb(skb, info, payload_len, zcopy);
+ if (err)
+ goto out;
+
+ if (msg_data_left(info->msg) == 0 &&
+ info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+
+ hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+
+ if (info->msg->msg_flags & MSG_EOR)
+ hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ }
+ }
+
+ if (info->reply)
+ virtio_vsock_skb_set_reply(skb);
+
+ trace_virtio_transport_alloc_pkt(src_cid, src_port,
+ dst_cid, dst_port,
+ payload_len,
+ info->type,
+ info->op,
+ info->flags,
+ zcopy);
+
+ return skb;
+out:
+ kfree_skb(skb);
+ return NULL;
+}
+
/* This function can only be used on connecting/connected sockets,
* since a socket assigned to a transport is required.
*
@@ -197,10 +324,12 @@ static u16 virtio_transport_get_type(struct sock *sk)
static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
struct virtio_vsock_pkt_info *info)
{
+ u32 max_skb_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
u32 src_cid, src_port, dst_cid, dst_port;
const struct virtio_transport *t_ops;
struct virtio_vsock_sock *vvs;
u32 pkt_len = info->pkt_len;
+ bool can_zcopy = false;
u32 rest_len;
int ret;
@@ -229,15 +358,30 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
return pkt_len;
+ if (info->msg) {
+ /* If zerocopy is not enabled by 'setsockopt()', we behave as
+ * there is no MSG_ZEROCOPY flag set.
+ */
+ if (!sock_flag(sk_vsock(vsk), SOCK_ZEROCOPY))
+ info->msg->msg_flags &= ~MSG_ZEROCOPY;
+
+ if (info->msg->msg_flags & MSG_ZEROCOPY)
+ can_zcopy = virtio_transport_can_zcopy(t_ops, info, pkt_len);
+
+ if (can_zcopy)
+ max_skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE,
+ (MAX_SKB_FRAGS * PAGE_SIZE));
+ }
+
rest_len = pkt_len;
do {
struct sk_buff *skb;
size_t skb_len;
- skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE, rest_len);
+ skb_len = min(max_skb_len, rest_len);
- skb = virtio_transport_alloc_skb(info, skb_len,
+ skb = virtio_transport_alloc_skb(info, skb_len, can_zcopy,
src_cid, src_port,
dst_cid, dst_port);
if (!skb) {
@@ -245,6 +389,21 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
break;
}
+ /* We process buffer part by part, allocating skb on
+ * each iteration. If this is last skb for this buffer
+ * and MSG_ZEROCOPY mode is in use - we must allocate
+ * completion for the current syscall.
+ */
+ if (info->msg && info->msg->msg_flags & MSG_ZEROCOPY &&
+ skb_len == rest_len && info->op == VIRTIO_VSOCK_OP_RW) {
+ if (virtio_transport_init_zcopy_skb(vsk, skb,
+ info->msg,
+ can_zcopy)) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
virtio_transport_inc_tx_pkt(vvs, skb);
ret = t_ops->send_pkt(skb);
@@ -364,9 +523,10 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
spin_unlock_bh(&vvs->rx_lock);
/* sk_lock is held by caller so no one else can dequeue.
- * Unlock rx_lock since memcpy_to_msg() may sleep.
+ * Unlock rx_lock since skb_copy_datagram_iter() may sleep.
*/
- err = memcpy_to_msg(msg, skb->data, bytes);
+ err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
+ &msg->msg_iter, bytes);
if (err)
goto out;
@@ -410,25 +570,27 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
skb = skb_peek(&vvs->rx_queue);
- bytes = len - total;
- if (bytes > skb->len)
- bytes = skb->len;
+ bytes = min_t(size_t, len - total,
+ skb->len - VIRTIO_VSOCK_SKB_CB(skb)->offset);
/* sk_lock is held by caller so no one else can dequeue.
- * Unlock rx_lock since memcpy_to_msg() may sleep.
+ * Unlock rx_lock since skb_copy_datagram_iter() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data, bytes);
+ err = skb_copy_datagram_iter(skb,
+ VIRTIO_VSOCK_SKB_CB(skb)->offset,
+ &msg->msg_iter, bytes);
if (err)
goto out;
spin_lock_bh(&vvs->rx_lock);
total += bytes;
- skb_pull(skb, bytes);
- if (skb->len == 0) {
+ VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes;
+
+ if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) {
u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
virtio_transport_dec_rx_pkt(vvs, pkt_len);
@@ -492,9 +654,10 @@ virtio_transport_seqpacket_do_peek(struct vsock_sock *vsk,
spin_unlock_bh(&vvs->rx_lock);
/* sk_lock is held by caller so no one else can dequeue.
- * Unlock rx_lock since memcpy_to_msg() may sleep.
+ * Unlock rx_lock since skb_copy_datagram_iter() may sleep.
*/
- err = memcpy_to_msg(msg, skb->data, bytes);
+ err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
+ &msg->msg_iter, bytes);
if (err)
return err;
@@ -553,11 +716,13 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
int err;
/* sk_lock is held by caller so no one else can dequeue.
- * Unlock rx_lock since memcpy_to_msg() may sleep.
+ * Unlock rx_lock since skb_copy_datagram_iter() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
+ err = skb_copy_datagram_iter(skb, 0,
+ &msg->msg_iter,
+ bytes_to_copy);
if (err) {
/* Copy of message failed. Rest of
* fragments will be freed without copy.
@@ -954,7 +1119,7 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
if (!t)
return -ENOTCONN;
- reply = virtio_transport_alloc_skb(&info, 0,
+ reply = virtio_transport_alloc_skb(&info, 0, false,
le64_to_cpu(hdr->dst_cid),
le32_to_cpu(hdr->dst_port),
le64_to_cpu(hdr->src_cid),
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 0fb5143bec7a..aad8ffeaee04 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -598,7 +598,7 @@ static struct sock *x25_make_new(struct sock *osk)
x25 = x25_sk(sk);
sk->sk_type = osk->sk_type;
- sk->sk_priority = osk->sk_priority;
+ sk->sk_priority = READ_ONCE(osk->sk_priority);
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 55f8b9b0e06d..f5e96e0d6e01 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -684,7 +684,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
}
skb->dev = dev;
- skb->priority = xs->sk.sk_priority;
+ skb->priority = READ_ONCE(xs->sk.sk_priority);
skb->mark = READ_ONCE(xs->sk.sk_mark);
skb->destructor = xsk_destruct_skb;
xsk_set_destructor_arg(skb);
@@ -1228,7 +1228,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xs->dev = dev;
xs->zc = xs->umem->zc;
- xs->sg = !!(flags & XDP_USE_SG);
+ xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
xs->queue_id = qid;
xp_add_xsk(xs->pool, xs);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index b3f7b310811e..49cb9f9a09be 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -170,6 +170,9 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
if (err)
return err;
+ if (flags & XDP_USE_SG)
+ pool->umem->flags |= XDP_UMEM_SG_FLAG;
+
if (flags & XDP_USE_NEED_WAKEUP)
pool->uses_need_wakeup = true;
/* Tx needs to be explicitly woken up the first time. Also
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d6b405782b63..c4c4fc29ccf5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2561,7 +2561,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
default:
BUG();
}
- xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
+ xdst = dst_alloc(dst_ops, NULL, DST_OBSOLETE_NONE, 0);
if (likely(xdst)) {
memset_after(xdst, 0, u.dst);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4ccf4236031c..6c707ebcebb9 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -175,6 +175,7 @@ TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE)
TPROGS_CFLAGS += -I$(srctree)/tools/include
TPROGS_CFLAGS += -I$(srctree)/tools/perf
+TPROGS_CFLAGS += -I$(srctree)/tools/lib
TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
ifdef SYSROOT
@@ -314,6 +315,9 @@ XDP_SAMPLE_CFLAGS += -Wall -O2 \
$(obj)/$(XDP_SAMPLE): TPROGS_CFLAGS = $(XDP_SAMPLE_CFLAGS)
$(obj)/$(XDP_SAMPLE): $(src)/xdp_sample_user.h $(src)/xdp_sample_shared.h
+# Override includes for trace_helpers.o because __must_check won't be defined
+# in our include path.
+$(obj)/$(TRACE_HELPERS): TPROGS_CFLAGS := $(TPROGS_CFLAGS) -D__must_check=
-include $(BPF_SAMPLES_PATH)/Makefile.target
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 2883660d6b67..04c47745b3ea 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -1209,7 +1209,7 @@ static int do_skeleton(int argc, char **argv)
codegen("\
\n\
\n\
- s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
+ s->data = %2$s__elf_bytes(&s->data_sz); \n\
\n\
obj->skeleton = s; \n\
return 0; \n\
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 0448700890f7..5f13db15a3c7 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -932,7 +932,14 @@ enum bpf_map_type {
*/
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
- BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
+ /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
+ * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
+ * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * deprecated.
+ */
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index c1634b95c223..2943a151d4f1 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -38,11 +38,27 @@ enum netdev_xdp_act {
NETDEV_XDP_ACT_MASK = 127,
};
+/**
+ * enum netdev_xdp_rx_metadata
+ * @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW
+ * timestamp via bpf_xdp_metadata_rx_timestamp().
+ * @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
+ * hash via bpf_xdp_metadata_rx_hash().
+ */
+enum netdev_xdp_rx_metadata {
+ NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
+ NETDEV_XDP_RX_METADATA_HASH = 2,
+
+ /* private: */
+ NETDEV_XDP_RX_METADATA_MASK = 3,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
NETDEV_A_DEV_XDP_FEATURES,
NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
+ NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
__NETDEV_A_DEV_MAX,
NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index bbab9ad9dc5a..77ceea575dc7 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -181,6 +181,7 @@ enum libbpf_tristate {
#define __ksym __attribute__((section(".ksyms")))
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
#define __kptr __attribute__((btf_type_tag("kptr")))
+#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
#define bpf_ksym_exists(sym) ({ \
_Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 8484b563b53d..ee95fd379d4d 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -448,6 +448,165 @@ static int btf_parse_type_sec(struct btf *btf)
return 0;
}
+static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
+{
+ const char *s;
+
+ s = btf__str_by_offset(btf, str_off);
+ if (!s) {
+ pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
+{
+ const struct btf_type *t;
+
+ t = btf__type_by_id(btf, id);
+ if (!t) {
+ pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
+{
+ __u32 kind = btf_kind(t);
+ int err, i, n;
+
+ err = btf_validate_str(btf, t->name_off, "type name", id);
+ if (err)
+ return err;
+
+ switch (kind) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_INT:
+ case BTF_KIND_FWD:
+ case BTF_KIND_FLOAT:
+ break;
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_VAR:
+ case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
+ err = btf_validate_id(btf, t->type, id);
+ if (err)
+ return err;
+ break;
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *a = btf_array(t);
+
+ err = btf_validate_id(btf, a->type, id);
+ err = err ?: btf_validate_id(btf, a->index_type, id);
+ if (err)
+ return err;
+ break;
+ }
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m = btf_members(t);
+
+ n = btf_vlen(t);
+ for (i = 0; i < n; i++, m++) {
+ err = btf_validate_str(btf, m->name_off, "field name", id);
+ err = err ?: btf_validate_id(btf, m->type, id);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_ENUM: {
+ const struct btf_enum *m = btf_enum(t);
+
+ n = btf_vlen(t);
+ for (i = 0; i < n; i++, m++) {
+ err = btf_validate_str(btf, m->name_off, "enum name", id);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_ENUM64: {
+ const struct btf_enum64 *m = btf_enum64(t);
+
+ n = btf_vlen(t);
+ for (i = 0; i < n; i++, m++) {
+ err = btf_validate_str(btf, m->name_off, "enum name", id);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_FUNC: {
+ const struct btf_type *ft;
+
+ err = btf_validate_id(btf, t->type, id);
+ if (err)
+ return err;
+ ft = btf__type_by_id(btf, t->type);
+ if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
+ pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
+ return -EINVAL;
+ }
+ break;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *m = btf_params(t);
+
+ n = btf_vlen(t);
+ for (i = 0; i < n; i++, m++) {
+ err = btf_validate_str(btf, m->name_off, "param name", id);
+ err = err ?: btf_validate_id(btf, m->type, id);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_DATASEC: {
+ const struct btf_var_secinfo *m = btf_var_secinfos(t);
+
+ n = btf_vlen(t);
+ for (i = 0; i < n; i++, m++) {
+ err = btf_validate_id(btf, m->type, id);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ default:
+ pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Validate basic sanity of BTF. It's intentionally less thorough than
+ * kernel's validation and validates only properties of BTF that libbpf relies
+ * on to be correct (e.g., valid type IDs, valid string offsets, etc)
+ */
+static int btf_sanity_check(const struct btf *btf)
+{
+ const struct btf_type *t;
+ __u32 i, n = btf__type_cnt(btf);
+ int err;
+
+ for (i = 1; i < n; i++) {
+ t = btf_type_by_id(btf, i);
+ err = btf_validate_type(btf, t, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
__u32 btf__type_cnt(const struct btf *btf)
{
return btf->start_id + btf->nr_types;
@@ -902,6 +1061,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
err = btf_parse_str_sec(btf);
err = err ?: btf_parse_type_sec(btf);
+ err = err ?: btf_sanity_check(btf);
if (err)
goto done;
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 96ff1aa4bf6a..3a6108e3238b 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -436,9 +436,11 @@ struct bpf_program {
int fd;
bool autoload;
bool autoattach;
+ bool sym_global;
bool mark_btf_static;
enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
+ int exception_cb_idx;
int prog_ifindex;
__u32 attach_btf_obj_fd;
@@ -765,6 +767,7 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
prog->type = BPF_PROG_TYPE_UNSPEC;
prog->fd = -1;
+ prog->exception_cb_idx = -1;
/* libbpf's convention for SEC("?abc...") is that it's just like
* SEC("abc...") but the corresponding bpf_program starts out with
@@ -871,14 +874,16 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
if (err)
return err;
+ if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
+ prog->sym_global = true;
+
/* if function is a global/weak symbol, but has restricted
* (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
* as static to enable more permissive BPF verification mode
* with more outside context available to BPF verifier
*/
- if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
- && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
- || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
+ if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
+ || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
prog->mark_btf_static = true;
nr_progs++;
@@ -3142,6 +3147,86 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
}
}
+ if (!kernel_supports(obj, FEAT_BTF_DECL_TAG))
+ goto skip_exception_cb;
+ for (i = 0; i < obj->nr_programs; i++) {
+ struct bpf_program *prog = &obj->programs[i];
+ int j, k, n;
+
+ if (prog_is_subprog(obj, prog))
+ continue;
+ n = btf__type_cnt(obj->btf);
+ for (j = 1; j < n; j++) {
+ const char *str = "exception_callback:", *name;
+ size_t len = strlen(str);
+ struct btf_type *t;
+
+ t = btf_type_by_id(obj->btf, j);
+ if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
+ continue;
+
+ name = btf__str_by_offset(obj->btf, t->name_off);
+ if (strncmp(name, str, len))
+ continue;
+
+ t = btf_type_by_id(obj->btf, t->type);
+ if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
+ pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
+ prog->name);
+ return -EINVAL;
+ }
+ if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)))
+ continue;
+ /* Multiple callbacks are specified for the same prog,
+ * the verifier will eventually return an error for this
+ * case, hence simply skip appending a subprog.
+ */
+ if (prog->exception_cb_idx >= 0) {
+ prog->exception_cb_idx = -1;
+ break;
+ }
+
+ name += len;
+ if (str_is_empty(name)) {
+ pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
+ prog->name);
+ return -EINVAL;
+ }
+
+ for (k = 0; k < obj->nr_programs; k++) {
+ struct bpf_program *subprog = &obj->programs[k];
+
+ if (!prog_is_subprog(obj, subprog))
+ continue;
+ if (strcmp(name, subprog->name))
+ continue;
+ /* Enforce non-hidden, as from verifier point of
+ * view it expects global functions, whereas the
+ * mark_btf_static fixes up linkage as static.
+ */
+ if (!subprog->sym_global || subprog->mark_btf_static) {
+ pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
+ prog->name, subprog->name);
+ return -EINVAL;
+ }
+ /* Let's see if we already saw a static exception callback with the same name */
+ if (prog->exception_cb_idx >= 0) {
+ pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
+ prog->name, subprog->name);
+ return -EINVAL;
+ }
+ prog->exception_cb_idx = k;
+ break;
+ }
+
+ if (prog->exception_cb_idx >= 0)
+ continue;
+ pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
+ return -ENOENT;
+ }
+ }
+skip_exception_cb:
+
sanitize = btf_needs_sanitization(obj);
if (sanitize) {
const void *raw_data;
@@ -6235,13 +6320,45 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra
}
static int
+bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
+ struct bpf_program *subprog)
+{
+ struct bpf_insn *insns;
+ size_t new_cnt;
+ int err;
+
+ subprog->sub_insn_off = main_prog->insns_cnt;
+
+ new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
+ insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
+ if (!insns) {
+ pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
+ return -ENOMEM;
+ }
+ main_prog->insns = insns;
+ main_prog->insns_cnt = new_cnt;
+
+ memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
+ subprog->insns_cnt * sizeof(*insns));
+
+ pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
+ main_prog->name, subprog->insns_cnt, subprog->name);
+
+ /* The subprog insns are now appended. Append its relos too. */
+ err = append_subprog_relos(main_prog, subprog);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int
bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
struct bpf_program *prog)
{
- size_t sub_insn_idx, insn_idx, new_cnt;
+ size_t sub_insn_idx, insn_idx;
struct bpf_program *subprog;
- struct bpf_insn *insns, *insn;
struct reloc_desc *relo;
+ struct bpf_insn *insn;
int err;
err = reloc_prog_func_and_line_info(obj, main_prog, prog);
@@ -6316,25 +6433,7 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
* and relocate.
*/
if (subprog->sub_insn_off == 0) {
- subprog->sub_insn_off = main_prog->insns_cnt;
-
- new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
- insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
- if (!insns) {
- pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
- return -ENOMEM;
- }
- main_prog->insns = insns;
- main_prog->insns_cnt = new_cnt;
-
- memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
- subprog->insns_cnt * sizeof(*insns));
-
- pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
- main_prog->name, subprog->insns_cnt, subprog->name);
-
- /* The subprog insns are now appended. Append its relos too. */
- err = append_subprog_relos(main_prog, subprog);
+ err = bpf_object__append_subprog_code(obj, main_prog, subprog);
if (err)
return err;
err = bpf_object__reloc_code(obj, main_prog, subprog);
@@ -6568,6 +6667,25 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
prog->name, err);
return err;
}
+
+ /* Now, also append exception callback if it has not been done already. */
+ if (prog->exception_cb_idx >= 0) {
+ struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
+
+ /* Calling exception callback directly is disallowed, which the
+ * verifier will reject later. In case it was processed already,
+ * we can skip this step, otherwise for all other valid cases we
+ * have to append exception callback now.
+ */
+ if (subprog->sub_insn_off == 0) {
+ err = bpf_object__append_subprog_code(obj, prog, subprog);
+ if (err)
+ return err;
+ err = bpf_object__reloc_code(obj, prog, subprog);
+ if (err)
+ return err;
+ }
+ }
}
/* Process data relos for main programs */
for (i = 0; i < obj->nr_programs; i++) {
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
index 8156f03e23ac..d664b36deb5b 100644
--- a/tools/net/ynl/Makefile
+++ b/tools/net/ynl/Makefile
@@ -3,7 +3,6 @@
SUBDIRS = lib generated samples
all: $(SUBDIRS)
- ./ynl-regen.sh -f -p $(PWD)/../../../
$(SUBDIRS):
@if [ -f "$@/Makefile" ] ; then \
diff --git a/tools/net/ynl/generated/Makefile b/tools/net/ynl/generated/Makefile
index f8817d2e56e4..0f359ee3c46a 100644
--- a/tools/net/ynl/generated/Makefile
+++ b/tools/net/ynl/generated/Makefile
@@ -19,7 +19,7 @@ SRCS=$(patsubst %,%-user.c,${GENS})
HDRS=$(patsubst %,%-user.h,${GENS})
OBJS=$(patsubst %,%-user.o,${GENS})
-all: protos.a $(HDRS) $(SRCS) $(KHDRS) $(KSRCS) $(UAPI) regen
+all: protos.a $(HDRS) $(SRCS) $(KHDRS) $(KSRCS) $(UAPI)
protos.a: $(OBJS)
@echo -e "\tAR $@"
diff --git a/tools/net/ynl/generated/handshake-user.h b/tools/net/ynl/generated/handshake-user.h
index 47646bb91cea..2b34acc608de 100644
--- a/tools/net/ynl/generated/handshake-user.h
+++ b/tools/net/ynl/generated/handshake-user.h
@@ -28,8 +28,8 @@ struct handshake_x509 {
__u32 privkey:1;
} _present;
- __u32 cert;
- __u32 privkey;
+ __s32 cert;
+ __s32 privkey;
};
/* ============== HANDSHAKE_CMD_ACCEPT ============== */
@@ -65,7 +65,7 @@ struct handshake_accept_rsp {
__u32 peername_len;
} _present;
- __u32 sockfd;
+ __s32 sockfd;
enum handshake_msg_type message_type;
__u32 timeout;
enum handshake_auth auth_mode;
@@ -104,7 +104,7 @@ struct handshake_done_req {
} _present;
__u32 status;
- __u32 sockfd;
+ __s32 sockfd;
unsigned int n_remote_auth;
__u32 *remote_auth;
};
@@ -122,7 +122,7 @@ handshake_done_req_set_status(struct handshake_done_req *req, __u32 status)
req->status = status;
}
static inline void
-handshake_done_req_set_sockfd(struct handshake_done_req *req, __u32 sockfd)
+handshake_done_req_set_sockfd(struct handshake_done_req *req, __s32 sockfd)
{
req->_present.sockfd = 1;
req->sockfd = sockfd;
diff --git a/tools/net/ynl/generated/netdev-user.c b/tools/net/ynl/generated/netdev-user.c
index 68b408ca0f7f..b5ffe8cd1144 100644
--- a/tools/net/ynl/generated/netdev-user.c
+++ b/tools/net/ynl/generated/netdev-user.c
@@ -45,12 +45,26 @@ const char *netdev_xdp_act_str(enum netdev_xdp_act value)
return netdev_xdp_act_strmap[value];
}
+static const char * const netdev_xdp_rx_metadata_strmap[] = {
+ [0] = "timestamp",
+ [1] = "hash",
+};
+
+const char *netdev_xdp_rx_metadata_str(enum netdev_xdp_rx_metadata value)
+{
+ value = ffs(value) - 1;
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(netdev_xdp_rx_metadata_strmap))
+ return NULL;
+ return netdev_xdp_rx_metadata_strmap[value];
+}
+
/* Policies */
struct ynl_policy_attr netdev_dev_policy[NETDEV_A_DEV_MAX + 1] = {
[NETDEV_A_DEV_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, },
[NETDEV_A_DEV_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
[NETDEV_A_DEV_XDP_FEATURES] = { .name = "xdp-features", .type = YNL_PT_U64, },
[NETDEV_A_DEV_XDP_ZC_MAX_SEGS] = { .name = "xdp-zc-max-segs", .type = YNL_PT_U32, },
+ [NETDEV_A_DEV_XDP_RX_METADATA_FEATURES] = { .name = "xdp-rx-metadata-features", .type = YNL_PT_U64, },
};
struct ynl_policy_nest netdev_dev_nest = {
@@ -97,6 +111,11 @@ int netdev_dev_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
return MNL_CB_ERROR;
dst->_present.xdp_zc_max_segs = 1;
dst->xdp_zc_max_segs = mnl_attr_get_u32(attr);
+ } else if (type == NETDEV_A_DEV_XDP_RX_METADATA_FEATURES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.xdp_rx_metadata_features = 1;
+ dst->xdp_rx_metadata_features = mnl_attr_get_u64(attr);
}
}
diff --git a/tools/net/ynl/generated/netdev-user.h b/tools/net/ynl/generated/netdev-user.h
index 0952d3261f4d..b4351ff34595 100644
--- a/tools/net/ynl/generated/netdev-user.h
+++ b/tools/net/ynl/generated/netdev-user.h
@@ -18,6 +18,7 @@ extern const struct ynl_family ynl_netdev_family;
/* Enums */
const char *netdev_op_str(int op);
const char *netdev_xdp_act_str(enum netdev_xdp_act value);
+const char *netdev_xdp_rx_metadata_str(enum netdev_xdp_rx_metadata value);
/* Common nested types */
/* ============== NETDEV_CMD_DEV_GET ============== */
@@ -48,11 +49,13 @@ struct netdev_dev_get_rsp {
__u32 ifindex:1;
__u32 xdp_features:1;
__u32 xdp_zc_max_segs:1;
+ __u32 xdp_rx_metadata_features:1;
} _present;
__u32 ifindex;
__u64 xdp_features;
__u32 xdp_zc_max_segs;
+ __u64 xdp_rx_metadata_features;
};
void netdev_dev_get_rsp_free(struct netdev_dev_get_rsp *rsp);
diff --git a/tools/net/ynl/samples/Makefile b/tools/net/ynl/samples/Makefile
index f2db8bb78309..3dbb106e87d9 100644
--- a/tools/net/ynl/samples/Makefile
+++ b/tools/net/ynl/samples/Makefile
@@ -19,6 +19,9 @@ include $(wildcard *.d)
all: $(BINS)
$(BINS): ../lib/ynl.a ../generated/protos.a
+ @echo -e '\tCC sample $@'
+ @$(COMPILE.c) $(CFLAGS_$@) $@.c -o $@.o
+ @$(LINK.c) $@.o -o $@ $(LDLIBS)
clean:
rm -f *.o *.d *~
diff --git a/tools/net/ynl/samples/netdev.c b/tools/net/ynl/samples/netdev.c
index 06433400dddd..b828225daad0 100644
--- a/tools/net/ynl/samples/netdev.c
+++ b/tools/net/ynl/samples/netdev.c
@@ -32,12 +32,18 @@ static void netdev_print_device(struct netdev_dev_get_rsp *d, unsigned int op)
if (!d->_present.xdp_features)
return;
- printf("%llx:", d->xdp_features);
+ printf("xdp-features (%llx):", d->xdp_features);
for (int i = 0; d->xdp_features > 1U << i; i++) {
if (d->xdp_features & (1U << i))
printf(" %s", netdev_xdp_act_str(1 << i));
}
+ printf(" xdp-rx-metadata-features (%llx):", d->xdp_rx_metadata_features);
+ for (int i = 0; d->xdp_rx_metadata_features > 1U << i; i++) {
+ if (d->xdp_rx_metadata_features & (1U << i))
+ printf(" %s", netdev_xdp_rx_metadata_str(1 << i));
+ }
+
printf(" xdp-zc-max-segs=%u", d->xdp_zc_max_segs);
name = netdev_op_str(op);
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 3babaf3eee5c..1e827c24761c 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -1,5 +1,6 @@
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
+exceptions # JIT does not support calling kfunc bpf_throw: -524
fexit_sleep # The test never returns. The remaining tests cannot start.
kprobe_multi_bench_attach # needs CONFIG_FPROBE
kprobe_multi_test # needs CONFIG_FPROBE
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index 5061d9e24c16..ce6f291665cf 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -6,6 +6,7 @@ bpf_loop # attaches to __x64_sys_nanosleep
cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
dynptr/test_dynptr_skb_data
dynptr/test_skb_readonly
+exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
iters/testmod_seq* # s390x doesn't support kfuncs in modules yet
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 209811b1993a..9aa29564bd74 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -131,4 +131,323 @@ extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *nod
*/
extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
+/* Description
+ * Allocates a percpu object of the type represented by 'local_type_id' in
+ * program BTF. User may use the bpf_core_type_id_local macro to pass the
+ * type ID of a struct in program BTF.
+ *
+ * The 'local_type_id' parameter must be a known constant.
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * A pointer to a percpu object of the type corresponding to the passed in
+ * 'local_type_id', or NULL on failure.
+ */
+extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_percpu_obj_new_impl */
+#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
+
+/* Description
+ * Free an allocated percpu object. All fields of the object that require
+ * destruction will be destructed before the storage is freed.
+ *
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * Void.
+ */
+extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_obj_drop_impl */
+#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
+
+/* Description
+ * Throw a BPF exception from the program, immediately terminating its
+ * execution and unwinding the stack. The supplied 'cookie' parameter
+ * will be the return value of the program when an exception is thrown,
+ * and the default exception callback is used. Otherwise, if an exception
+ * callback is set using the '__exception_cb(callback)' declaration tag
+ * on the main program, the 'cookie' parameter will be the callback's only
+ * input argument.
+ *
+ * Thus, in case of default exception callback, 'cookie' is subjected to
+ * constraints on the program's return value (as with R0 on exit).
+ * Otherwise, the return value of the marked exception callback will be
+ * subjected to the same checks.
+ *
+ * Note that throwing an exception with lingering resources (locks,
+ * references, etc.) will lead to a verification error.
+ *
+ * Note that callbacks *cannot* call this helper.
+ * Returns
+ * Never.
+ * Throws
+ * An exception with the specified 'cookie' value.
+ */
+extern void bpf_throw(u64 cookie) __ksym;
+
+/* This macro must be used to mark the exception callback corresponding to the
+ * main program. For example:
+ *
+ * int exception_cb(u64 cookie) {
+ * return cookie;
+ * }
+ *
+ * SEC("tc")
+ * __exception_cb(exception_cb)
+ * int main_prog(struct __sk_buff *ctx) {
+ * ...
+ * return TC_ACT_OK;
+ * }
+ *
+ * Here, exception callback for the main program will be 'exception_cb'. Note
+ * that this attribute can only be used once, and multiple exception callbacks
+ * specified for the main program will lead to verification error.
+ */
+#define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))
+
+#define __bpf_assert_signed(x) _Generic((x), \
+ unsigned long: 0, \
+ unsigned long long: 0, \
+ signed long: 1, \
+ signed long long: 1 \
+)
+
+#define __bpf_assert_check(LHS, op, RHS) \
+ _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
+ _Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n"); \
+ _Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert"); \
+ _Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")
+
+#define __bpf_assert(LHS, op, cons, RHS, VAL) \
+ ({ \
+ (void)bpf_throw; \
+ asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw" \
+ : : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : ); \
+ })
+
+#define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign) \
+ ({ \
+ __bpf_assert_check(LHS, op, RHS); \
+ if (__bpf_assert_signed(LHS) && !(supp_sign)) \
+ __bpf_assert(LHS, "s" #op, cons, RHS, VAL); \
+ else \
+ __bpf_assert(LHS, #op, cons, RHS, VAL); \
+ })
+
+#define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign) \
+ ({ \
+ if (sizeof(typeof(RHS)) == 8) { \
+ const typeof(RHS) rhs_var = (RHS); \
+ __bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign); \
+ } else { \
+ __bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign); \
+ } \
+ })
+
+/* Description
+ * Assert that a conditional expression is true.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert(cond) if (!(cond)) bpf_throw(0);
+
+/* Description
+ * Assert that a conditional expression is true.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
+
+/* Description
+ * Assert that LHS is equal to RHS. This statement updates the known value
+ * of LHS during verification. Note that RHS must be a constant value, and
+ * must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_eq(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, ==, RHS, 0, true); \
+ })
+
+/* Description
+ * Assert that LHS is equal to RHS. This statement updates the known value
+ * of LHS during verification. Note that RHS must be a constant value, and
+ * must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_eq_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, ==, RHS, value, true); \
+ })
+
+/* Description
+ * Assert that LHS is less than RHS. This statement updates the known
+ * bounds of LHS during verification. Note that RHS must be a constant
+ * value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_lt(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, <, RHS, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is less than RHS. This statement updates the known
+ * bounds of LHS during verification. Note that RHS must be a constant
+ * value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_lt_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, <, RHS, value, false); \
+ })
+
+/* Description
+ * Assert that LHS is greater than RHS. This statement updates the known
+ * bounds of LHS during verification. Note that RHS must be a constant
+ * value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_gt(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >, RHS, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is greater than RHS. This statement updates the known
+ * bounds of LHS during verification. Note that RHS must be a constant
+ * value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_gt_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >, RHS, value, false); \
+ })
+
+/* Description
+ * Assert that LHS is less than or equal to RHS. This statement updates the
+ * known bounds of LHS during verification. Note that RHS must be a
+ * constant value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_le(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, <=, RHS, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is less than or equal to RHS. This statement updates the
+ * known bounds of LHS during verification. Note that RHS must be a
+ * constant value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_le_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, <=, RHS, value, false); \
+ })
+
+/* Description
+ * Assert that LHS is greater than or equal to RHS. This statement updates
+ * the known bounds of LHS during verification. Note that RHS must be a
+ * constant value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_ge(LHS, RHS) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, RHS, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is greater than or equal to RHS. This statement updates
+ * the known bounds of LHS during verification. Note that RHS must be a
+ * constant value, and must fit within the data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_ge_with(LHS, RHS, value) \
+ ({ \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, RHS, value, false); \
+ })
+
+/* Description
+ * Assert that LHS is in the range [BEG, END] (inclusive of both). This
+ * statement updates the known bounds of LHS during verification. Note
+ * that both BEG and END must be constant values, and must fit within the
+ * data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_range(LHS, BEG, END) \
+ ({ \
+ _Static_assert(BEG <= END, "BEG must be <= END"); \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, BEG, 0, false); \
+ __bpf_assert_op(LHS, <=, END, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is in the range [BEG, END] (inclusive of both). This
+ * statement updates the known bounds of LHS during verification. Note
+ * that both BEG and END must be constant values, and must fit within the
+ * data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_range_with(LHS, BEG, END, value) \
+ ({ \
+ _Static_assert(BEG <= END, "BEG must be <= END"); \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, BEG, value, false); \
+ __bpf_assert_op(LHS, <=, END, value, false); \
+ })
+
#endif
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 4e0cdb593318..92d51f377fe5 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -7296,7 +7296,7 @@ static struct btf_dedup_test dedup_tests[] = {
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
- BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
+ BTF_FUNC_ENC(NAME_NTH(4), 3), /* [4] */
/* tag -> t */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */
@@ -7317,7 +7317,7 @@ static struct btf_dedup_test dedup_tests[] = {
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
- BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
+ BTF_FUNC_ENC(NAME_NTH(4), 3), /* [4] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */
diff --git a/tools/testing/selftests/bpf/prog_tests/exceptions.c b/tools/testing/selftests/bpf/prog_tests/exceptions.c
new file mode 100644
index 000000000000..516f4a13013c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/exceptions.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "exceptions.skel.h"
+#include "exceptions_ext.skel.h"
+#include "exceptions_fail.skel.h"
+#include "exceptions_assert.skel.h"
+
+static char log_buf[1024 * 1024];
+
+static void test_exceptions_failure(void)
+{
+ RUN_TESTS(exceptions_fail);
+}
+
+static void test_exceptions_success(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, ropts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct exceptions_ext *eskel = NULL;
+ struct exceptions *skel;
+ int ret;
+
+ skel = exceptions__open();
+ if (!ASSERT_OK_PTR(skel, "exceptions__open"))
+ return;
+
+ ret = exceptions__load(skel);
+ if (!ASSERT_OK(ret, "exceptions__load"))
+ goto done;
+
+ if (!ASSERT_OK(bpf_map_update_elem(bpf_map__fd(skel->maps.jmp_table), &(int){0},
+ &(int){bpf_program__fd(skel->progs.exception_tail_call_target)}, BPF_ANY),
+ "bpf_map_update_elem jmp_table"))
+ goto done;
+
+#define RUN_SUCCESS(_prog, return_val) \
+ if (!test__start_subtest(#_prog)) goto _prog##_##return_val; \
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs._prog), &ropts); \
+ ASSERT_OK(ret, #_prog " prog run ret"); \
+ ASSERT_EQ(ropts.retval, return_val, #_prog " prog run retval"); \
+ _prog##_##return_val:
+
+ RUN_SUCCESS(exception_throw_always_1, 64);
+ RUN_SUCCESS(exception_throw_always_2, 32);
+ RUN_SUCCESS(exception_throw_unwind_1, 16);
+ RUN_SUCCESS(exception_throw_unwind_2, 32);
+ RUN_SUCCESS(exception_throw_default, 0);
+ RUN_SUCCESS(exception_throw_default_value, 5);
+ RUN_SUCCESS(exception_tail_call, 24);
+ RUN_SUCCESS(exception_ext, 0);
+ RUN_SUCCESS(exception_ext_mod_cb_runtime, 35);
+ RUN_SUCCESS(exception_throw_subprog, 1);
+ RUN_SUCCESS(exception_assert_nz_gfunc, 1);
+ RUN_SUCCESS(exception_assert_zero_gfunc, 1);
+ RUN_SUCCESS(exception_assert_neg_gfunc, 1);
+ RUN_SUCCESS(exception_assert_pos_gfunc, 1);
+ RUN_SUCCESS(exception_assert_negeq_gfunc, 1);
+ RUN_SUCCESS(exception_assert_poseq_gfunc, 1);
+ RUN_SUCCESS(exception_assert_nz_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_zero_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_neg_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_pos_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_negeq_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_poseq_gfunc_with, 1);
+ RUN_SUCCESS(exception_bad_assert_nz_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_zero_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_neg_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_pos_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_negeq_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_poseq_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_nz_gfunc_with, 100);
+ RUN_SUCCESS(exception_bad_assert_zero_gfunc_with, 105);
+ RUN_SUCCESS(exception_bad_assert_neg_gfunc_with, 200);
+ RUN_SUCCESS(exception_bad_assert_pos_gfunc_with, 0);
+ RUN_SUCCESS(exception_bad_assert_negeq_gfunc_with, 101);
+ RUN_SUCCESS(exception_bad_assert_poseq_gfunc_with, 99);
+ RUN_SUCCESS(exception_assert_range, 1);
+ RUN_SUCCESS(exception_assert_range_with, 1);
+ RUN_SUCCESS(exception_bad_assert_range, 0);
+ RUN_SUCCESS(exception_bad_assert_range_with, 10);
+
+#define RUN_EXT(load_ret, attach_err, expr, msg, after_link) \
+ { \
+ LIBBPF_OPTS(bpf_object_open_opts, o, .kernel_log_buf = log_buf, \
+ .kernel_log_size = sizeof(log_buf), \
+ .kernel_log_level = 2); \
+ exceptions_ext__destroy(eskel); \
+ eskel = exceptions_ext__open_opts(&o); \
+ struct bpf_program *prog = NULL; \
+ struct bpf_link *link = NULL; \
+ if (!ASSERT_OK_PTR(eskel, "exceptions_ext__open")) \
+ goto done; \
+ (expr); \
+ ASSERT_OK_PTR(bpf_program__name(prog), bpf_program__name(prog)); \
+ if (!ASSERT_EQ(exceptions_ext__load(eskel), load_ret, \
+ "exceptions_ext__load")) { \
+ printf("%s\n", log_buf); \
+ goto done; \
+ } \
+ if (load_ret != 0) { \
+ if (!ASSERT_OK_PTR(strstr(log_buf, msg), "strstr")) { \
+ printf("%s\n", log_buf); \
+ goto done; \
+ } \
+ } \
+ if (!load_ret && attach_err) { \
+ if (!ASSERT_ERR_PTR(link = bpf_program__attach(prog), "attach err")) \
+ goto done; \
+ } else if (!load_ret) { \
+ if (!ASSERT_OK_PTR(link = bpf_program__attach(prog), "attach ok")) \
+ goto done; \
+ (void)(after_link); \
+ bpf_link__destroy(link); \
+ } \
+ }
+
+ if (test__start_subtest("non-throwing fentry -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing fentry -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.throwing_fentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("non-throwing fexit -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing fexit -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.throwing_fexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing extension (with custom cb) -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.throwing_exception_cb_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "Extension programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing extension -> global func in exception_cb"))
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_exception_cb_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod_global"), "set_attach_target"))
+ goto done;
+ }), "", ({ RUN_SUCCESS(exception_ext_mod_cb_runtime, 131); }));
+
+ if (test__start_subtest("throwing extension (with custom cb) -> global func in exception_cb"))
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext),
+ "exception_ext_global"), "set_attach_target"))
+ goto done;
+ }), "", ({ RUN_SUCCESS(exception_ext, 128); }));
+
+ if (test__start_subtest("non-throwing fentry -> non-throwing subprog"))
+ /* non-throwing fentry -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fentry -> non-throwing subprog"))
+ /* throwing fentry -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing fentry -> throwing subprog"))
+ /* non-throwing fentry -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fentry -> throwing subprog"))
+ /* throwing fentry -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing fexit -> non-throwing subprog"))
+ /* non-throwing fexit -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fexit -> non-throwing subprog"))
+ /* throwing fexit -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing fexit -> throwing subprog"))
+ /* non-throwing fexit -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fexit -> throwing subprog"))
+ /* throwing fexit -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ /* fmod_ret not allowed for subprog - Check so we remember to handle its
+ * throwing specification compatibility with target when supported.
+ */
+ if (test__start_subtest("non-throwing fmod_ret -> non-throwing subprog"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfmod_ret;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "can't modify return codes of BPF program", 0);
+
+ /* fmod_ret not allowed for subprog - Check so we remember to handle its
+ * throwing specification compatibility with target when supported.
+ */
+ if (test__start_subtest("non-throwing fmod_ret -> non-throwing global subprog"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfmod_ret;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "global_subprog"), "set_attach_target"))
+ goto done;
+ }), "can't modify return codes of BPF program", 0);
+
+ if (test__start_subtest("non-throwing extension -> non-throwing subprog"))
+ /* non-throwing extension -> non-throwing subprog : BAD (!global) */
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "subprog() is not a global function", 0);
+
+ if (test__start_subtest("non-throwing extension -> throwing subprog"))
+ /* non-throwing extension -> throwing subprog : BAD (!global) */
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "throwing_subprog() is not a global function", 0);
+
+ if (test__start_subtest("non-throwing extension -> non-throwing subprog"))
+ /* non-throwing extension -> non-throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing extension -> throwing global subprog"))
+ /* non-throwing extension -> throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing extension -> throwing global subprog"))
+ /* throwing extension -> throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing extension -> non-throwing global subprog"))
+ /* throwing extension -> non-throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing extension -> main subprog"))
+ /* non-throwing extension -> main subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "exception_throw_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing extension -> main subprog"))
+ /* throwing extension -> main subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "exception_throw_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+done:
+ exceptions_ext__destroy(eskel);
+ exceptions__destroy(skel);
+}
+
+static void test_exceptions_assertions(void)
+{
+ RUN_TESTS(exceptions_assert);
+}
+
+void test_exceptions(void)
+{
+ test_exceptions_success();
+ test_exceptions_failure();
+ test_exceptions_assertions();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
index 9d768e083714..97142a4db374 100644
--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
@@ -308,7 +308,7 @@ void test_fill_link_info(void)
return;
/* load kallsyms to compare the addr */
- if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
goto cleanup;
kprobe_addr = ksym_get_addr(KPROBE_FUNC);
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
index 1fbe7e4ac00a..9d03528f05db 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
@@ -4,6 +4,8 @@
#include "trace_helpers.h"
#include "bpf/libbpf_internal.h"
+static struct ksyms *ksyms;
+
static void kprobe_multi_testmod_check(struct kprobe_multi *skel)
{
ASSERT_EQ(skel->bss->kprobe_testmod_test1_result, 1, "kprobe_test1_result");
@@ -50,12 +52,12 @@ static void test_testmod_attach_api_addrs(void)
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
unsigned long long addrs[3];
- addrs[0] = ksym_get_addr("bpf_testmod_fentry_test1");
- ASSERT_NEQ(addrs[0], 0, "ksym_get_addr");
- addrs[1] = ksym_get_addr("bpf_testmod_fentry_test2");
- ASSERT_NEQ(addrs[1], 0, "ksym_get_addr");
- addrs[2] = ksym_get_addr("bpf_testmod_fentry_test3");
- ASSERT_NEQ(addrs[2], 0, "ksym_get_addr");
+ addrs[0] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test1");
+ ASSERT_NEQ(addrs[0], 0, "ksym_get_addr_local");
+ addrs[1] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test2");
+ ASSERT_NEQ(addrs[1], 0, "ksym_get_addr_local");
+ addrs[2] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test3");
+ ASSERT_NEQ(addrs[2], 0, "ksym_get_addr_local");
opts.addrs = (const unsigned long *) addrs;
opts.cnt = ARRAY_SIZE(addrs);
@@ -79,11 +81,15 @@ static void test_testmod_attach_api_syms(void)
void serial_test_kprobe_multi_testmod_test(void)
{
- if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
+ ksyms = load_kallsyms_local();
+ if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
return;
if (test__start_subtest("testmod_attach_api_syms"))
test_testmod_attach_api_syms();
+
if (test__start_subtest("testmod_attach_api_addrs"))
test_testmod_attach_api_addrs();
+
+ free_kallsyms_local(ksyms);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
index efb8bd43653c..c440ea3311ed 100644
--- a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
@@ -142,10 +142,14 @@ static void test_libbpf_bpf_map_type_str(void)
/* Special case for map_type_name BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED
* where it and BPF_MAP_TYPE_CGROUP_STORAGE have the same enum value
* (map_type). For this enum value, libbpf_bpf_map_type_str() picks
- * BPF_MAP_TYPE_CGROUP_STORAGE.
+ * BPF_MAP_TYPE_CGROUP_STORAGE. The same for
+ * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED and
+ * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE.
*/
if (strcmp(map_type_name, "BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED") == 0)
continue;
+ if (strcmp(map_type_name, "BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED") == 0)
+ continue;
ASSERT_STREQ(buf, map_type_name, "exp_str_value");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
index 18cf7b17463d..db3bf6bbe01a 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -65,8 +65,8 @@ static struct {
{ "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
{ "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
- { "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
- { "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
+ { "obj_new_no_composite", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
+ { "obj_new_no_struct", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
{ "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
{ "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
{ "obj_new_acq", "Unreleased reference id=" },
diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
index c7636e18b1eb..aa9f67eb1c95 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
@@ -61,6 +61,11 @@ void test_module_fentry_shadow(void)
int link_fd[2] = {};
__s32 btf_id[2] = {};
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
.expected_attach_type = BPF_TRACE_FENTRY,
);
diff --git a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
new file mode 100644
index 000000000000..9541e9b3a034
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "percpu_alloc_array.skel.h"
+#include "percpu_alloc_cgrp_local_storage.skel.h"
+#include "percpu_alloc_fail.skel.h"
+
+static void test_array(void)
+{
+ struct percpu_alloc_array *skel;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = percpu_alloc_array__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.test_array_map_1, true);
+ bpf_program__set_autoload(skel->progs.test_array_map_2, true);
+ bpf_program__set_autoload(skel->progs.test_array_map_3, true);
+ bpf_program__set_autoload(skel->progs.test_array_map_4, true);
+
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_array__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__load"))
+ goto out;
+
+ err = percpu_alloc_array__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.test_array_map_1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run array_map 1-4");
+ ASSERT_EQ(topts.retval, 0, "test_run array_map 1-4");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+out:
+ percpu_alloc_array__destroy(skel);
+}
+
+static void test_array_sleepable(void)
+{
+ struct percpu_alloc_array *skel;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = percpu_alloc_array__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.test_array_map_10, true);
+
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_array__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__load"))
+ goto out;
+
+ err = percpu_alloc_array__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.test_array_map_10);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run array_map_10");
+ ASSERT_EQ(topts.retval, 0, "test_run array_map_10");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+out:
+ percpu_alloc_array__destroy(skel);
+}
+
+static void test_cgrp_local_storage(void)
+{
+ struct percpu_alloc_cgrp_local_storage *skel;
+ int err, cgroup_fd, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ cgroup_fd = test__join_cgroup("/percpu_alloc");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /percpu_alloc"))
+ return;
+
+ skel = percpu_alloc_cgrp_local_storage__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc_cgrp_local_storage__open"))
+ goto close_fd;
+
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_cgrp_local_storage__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__load"))
+ goto destroy_skel;
+
+ err = percpu_alloc_cgrp_local_storage__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__attach"))
+ goto destroy_skel;
+
+ prog_fd = bpf_program__fd(skel->progs.test_cgrp_local_storage_1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run cgrp_local_storage 1-3");
+ ASSERT_EQ(topts.retval, 0, "test_run cgrp_local_storage 1-3");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+
+destroy_skel:
+ percpu_alloc_cgrp_local_storage__destroy(skel);
+close_fd:
+ close(cgroup_fd);
+}
+
+static void test_failure(void) {
+ RUN_TESTS(percpu_alloc_fail);
+}
+
+void test_percpu_alloc(void)
+{
+ if (test__start_subtest("array"))
+ test_array();
+ if (test__start_subtest("array_sleepable"))
+ test_array_sleepable();
+ if (test__start_subtest("cgrp_local_storage"))
+ test_cgrp_local_storage();
+ if (test__start_subtest("failure_tests"))
+ test_failure();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c b/tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c
new file mode 100644
index 000000000000..3a2ec3923fca
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <test_progs.h>
+
+#include "preempted_bpf_ma_op.skel.h"
+
+#define ALLOC_THREAD_NR 4
+#define ALLOC_LOOP_NR 512
+
+struct alloc_ctx {
+ /* output */
+ int run_err;
+ /* input */
+ int fd;
+ bool *nomem_err;
+};
+
+static void *run_alloc_prog(void *data)
+{
+ struct alloc_ctx *ctx = data;
+ cpu_set_t cpu_set;
+ int i;
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(0, &cpu_set);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
+
+ for (i = 0; i < ALLOC_LOOP_NR && !*ctx->nomem_err; i++) {
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err;
+
+ err = bpf_prog_test_run_opts(ctx->fd, &topts);
+ ctx->run_err |= err | topts.retval;
+ }
+
+ return NULL;
+}
+
+void test_preempted_bpf_ma_op(void)
+{
+ struct alloc_ctx ctx[ALLOC_THREAD_NR];
+ struct preempted_bpf_ma_op *skel;
+ pthread_t tid[ALLOC_THREAD_NR];
+ int i, err;
+
+ skel = preempted_bpf_ma_op__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ err = preempted_bpf_ma_op__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(ctx); i++) {
+ struct bpf_program *prog;
+ char name[8];
+
+ snprintf(name, sizeof(name), "test%d", i);
+ prog = bpf_object__find_program_by_name(skel->obj, name);
+ if (!ASSERT_OK_PTR(prog, "no test prog"))
+ goto out;
+
+ ctx[i].run_err = 0;
+ ctx[i].fd = bpf_program__fd(prog);
+ ctx[i].nomem_err = &skel->bss->nomem_err;
+ }
+
+ memset(tid, 0, sizeof(tid));
+ for (i = 0; i < ARRAY_SIZE(tid); i++) {
+ err = pthread_create(&tid[i], NULL, run_alloc_prog, &ctx[i]);
+ if (!ASSERT_OK(err, "pthread_create"))
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tid); i++) {
+ if (!tid[i])
+ break;
+ pthread_join(tid[i], NULL);
+ ASSERT_EQ(ctx[i].run_err, 0, "run prog err");
+ }
+
+ ASSERT_FALSE(skel->bss->nomem_err, "ENOMEM");
+out:
+ preempted_bpf_ma_op__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 58fe2c586ed7..fc6b2954e8f5 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -218,12 +218,14 @@ out:
bpf_object__close(obj);
}
-static void test_tailcall_count(const char *which)
+static void test_tailcall_count(const char *which, bool test_fentry,
+ bool test_fexit)
{
+ struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
+ struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
- struct bpf_object *obj;
char buff[128] = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = buff,
@@ -265,23 +267,105 @@ static void test_tailcall_count(const char *which)
if (CHECK_FAIL(err))
goto out;
+ if (test_fentry) {
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+ }
+
+ if (test_fexit) {
+ fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
+ if (!ASSERT_OK_PTR(prog, "find fexit prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fexit_obj);
+ if (!ASSERT_OK(err, "load fexit_obj"))
+ goto out;
+
+ fexit_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
+ goto out;
+ }
+
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "tailcall");
ASSERT_EQ(topts.retval, 1, "tailcall retval");
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
ASSERT_OK(err, "tailcall count");
ASSERT_EQ(val, 33, "tailcall count");
+ if (test_fentry) {
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fentry.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0,
+ "find tailcall_bpf2bpf_fentry.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 33, "fentry count");
+ }
+
+ if (test_fexit) {
+ data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fexit.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0,
+ "find tailcall_bpf2bpf_fexit.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "fexit count");
+ ASSERT_EQ(val, 33, "fexit count");
+ }
+
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
@@ -291,6 +375,10 @@ static void test_tailcall_count(const char *which)
ASSERT_OK(err, "tailcall");
ASSERT_OK(topts.retval, "tailcall retval");
out:
+ bpf_link__destroy(fentry_link);
+ bpf_link__destroy(fexit_link);
+ bpf_object__close(fentry_obj);
+ bpf_object__close(fexit_obj);
bpf_object__close(obj);
}
@@ -299,7 +387,7 @@ out:
*/
static void test_tailcall_3(void)
{
- test_tailcall_count("tailcall3.bpf.o");
+ test_tailcall_count("tailcall3.bpf.o", false, false);
}
/* test_tailcall_6 checks that the count value of the tail call limit
@@ -307,7 +395,7 @@ static void test_tailcall_3(void)
*/
static void test_tailcall_6(void)
{
- test_tailcall_count("tailcall6.bpf.o");
+ test_tailcall_count("tailcall6.bpf.o", false, false);
}
/* test_tailcall_4 checks that the kernel properly selects indirect jump
@@ -352,11 +440,11 @@ static void test_tailcall_4(void)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
@@ -442,11 +530,11 @@ static void test_tailcall_5(void)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
@@ -631,11 +719,11 @@ static void test_tailcall_bpf2bpf_2(void)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
@@ -805,11 +893,11 @@ static void test_tailcall_bpf2bpf_4(bool noise)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
i = 0;
val.noise = noise;
@@ -872,7 +960,7 @@ static void test_tailcall_bpf2bpf_6(void)
ASSERT_EQ(topts.retval, 0, "tailcall retval");
data_fd = bpf_map__fd(obj->maps.bss);
- if (!ASSERT_GE(map_fd, 0, "bss map fd"))
+ if (!ASSERT_GE(data_fd, 0, "bss map fd"))
goto out;
i = 0;
@@ -884,6 +972,139 @@ out:
tailcall_bpf2bpf6__destroy(obj);
}
+/* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
+ * limit enforcement matches with expectations when tailcall is preceded with
+ * bpf2bpf call, and the bpf2bpf call is traced by fentry.
+ */
+static void test_tailcall_bpf2bpf_fentry(void)
+{
+ test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
+}
+
+/* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
+ * limit enforcement matches with expectations when tailcall is preceded with
+ * bpf2bpf call, and the bpf2bpf call is traced by fexit.
+ */
+static void test_tailcall_bpf2bpf_fexit(void)
+{
+ test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
+}
+
+/* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcall is preceded
+ * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
+ */
+static void test_tailcall_bpf2bpf_fentry_fexit(void)
+{
+ test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
+}
+
+/* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcall is preceded
+ * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
+ */
+static void test_tailcall_bpf2bpf_fentry_entry(void)
+{
+ struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
+ int err, map_fd, prog_fd, data_fd, i, val;
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_link *fentry_link = NULL;
+ struct bpf_program *prog;
+ char buff[128] = {};
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
+ BPF_PROG_TYPE_SCHED_CLS,
+ &tgt_obj, &prog_fd);
+ if (!ASSERT_OK(err, "load tgt_obj"))
+ return;
+
+ prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
+ if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
+ if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
+ if (!ASSERT_OK(err, "set_attach_target classifier_0"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
+
+ data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val, 34, "tailcall count");
+
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fentry.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0,
+ "find tailcall_bpf2bpf_fentry.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 1, "fentry count");
+
+out:
+ bpf_link__destroy(fentry_link);
+ bpf_object__close(fentry_obj);
+ bpf_object__close(tgt_obj);
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -910,4 +1131,12 @@ void test_tailcalls(void)
test_tailcall_bpf2bpf_4(true);
if (test__start_subtest("tailcall_bpf2bpf_6"))
test_tailcall_bpf2bpf_6();
+ if (test__start_subtest("tailcall_bpf2bpf_fentry"))
+ test_tailcall_bpf2bpf_fentry();
+ if (test__start_subtest("tailcall_bpf2bpf_fexit"))
+ test_tailcall_bpf2bpf_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
+ test_tailcall_bpf2bpf_fentry_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
+ test_tailcall_bpf2bpf_fentry_entry();
}
diff --git a/tools/testing/selftests/bpf/progs/exceptions.c b/tools/testing/selftests/bpf/progs/exceptions.c
new file mode 100644
index 000000000000..2811ee842b01
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#ifndef ETH_P_IP
+#define ETH_P_IP 0x0800
+#endif
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 4);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static __noinline int static_func(u64 i)
+{
+ bpf_throw(32);
+ return i;
+}
+
+__noinline int global2static_simple(u64 i)
+{
+ static_func(i + 2);
+ return i - 1;
+}
+
+__noinline int global2static(u64 i)
+{
+ if (i == ETH_P_IP)
+ bpf_throw(16);
+ return static_func(i);
+}
+
+static __noinline int static2global(u64 i)
+{
+ return global2static(i) + i;
+}
+
+SEC("tc")
+int exception_throw_always_1(struct __sk_buff *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+/* In this case, the global func will never be seen executing after call to
+ * static subprog, hence verifier will DCE the remaining instructions. Ensure we
+ * are resilient to that.
+ */
+SEC("tc")
+int exception_throw_always_2(struct __sk_buff *ctx)
+{
+ return global2static_simple(ctx->protocol);
+}
+
+SEC("tc")
+int exception_throw_unwind_1(struct __sk_buff *ctx)
+{
+ return static2global(bpf_ntohs(ctx->protocol));
+}
+
+SEC("tc")
+int exception_throw_unwind_2(struct __sk_buff *ctx)
+{
+ return static2global(bpf_ntohs(ctx->protocol) - 1);
+}
+
+SEC("tc")
+int exception_throw_default(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 1;
+}
+
+SEC("tc")
+int exception_throw_default_value(struct __sk_buff *ctx)
+{
+ bpf_throw(5);
+ return 1;
+}
+
+SEC("tc")
+int exception_tail_call_target(struct __sk_buff *ctx)
+{
+ bpf_throw(16);
+ return 0;
+}
+
+static __noinline
+int exception_tail_call_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 10;
+
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return ret;
+}
+
+SEC("tc")
+int exception_tail_call(struct __sk_buff *ctx) {
+ volatile int ret = 0;
+
+ ret = exception_tail_call_subprog(ctx);
+ return ret + 8;
+}
+
+__noinline int exception_ext_global(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ return ret;
+}
+
+static __noinline int exception_ext_static(struct __sk_buff *ctx)
+{
+ return exception_ext_global(ctx);
+}
+
+SEC("tc")
+int exception_ext(struct __sk_buff *ctx)
+{
+ return exception_ext_static(ctx);
+}
+
+__noinline int exception_cb_mod_global(u64 cookie)
+{
+ volatile int ret = 0;
+
+ return ret;
+}
+
+/* Example of how the exception callback supplied during verification can still
+ * introduce extensions by calling to dummy global functions, and alter runtime
+ * behavior.
+ *
+ * Right now we don't allow freplace attachment to exception callback itself,
+ * but if the need arises this restriction is technically feasible to relax in
+ * the future.
+ */
+__noinline int exception_cb_mod(u64 cookie)
+{
+ return exception_cb_mod_global(cookie) + cookie + 10;
+}
+
+SEC("tc")
+__exception_cb(exception_cb_mod)
+int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
+{
+ bpf_throw(25);
+ return 0;
+}
+
+__noinline static int subprog(struct __sk_buff *ctx)
+{
+ return bpf_ktime_get_ns();
+}
+
+__noinline static int throwing_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->tstamp)
+ bpf_throw(0);
+ return bpf_ktime_get_ns();
+}
+
+__noinline int global_subprog(struct __sk_buff *ctx)
+{
+ return bpf_ktime_get_ns();
+}
+
+__noinline int throwing_global_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->tstamp)
+ bpf_throw(0);
+ return bpf_ktime_get_ns();
+}
+
+SEC("tc")
+int exception_throw_subprog(struct __sk_buff *ctx)
+{
+ switch (ctx->protocol) {
+ case 1:
+ return subprog(ctx);
+ case 2:
+ return global_subprog(ctx);
+ case 3:
+ return throwing_subprog(ctx);
+ case 4:
+ return throwing_global_subprog(ctx);
+ default:
+ break;
+ }
+ bpf_throw(1);
+ return 0;
+}
+
+__noinline int assert_nz_gfunc(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert(cookie != 0);
+ return 0;
+}
+
+__noinline int assert_zero_gfunc(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_eq(cookie, 0);
+ return 0;
+}
+
+__noinline int assert_neg_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_lt(cookie, 0);
+ return 0;
+}
+
+__noinline int assert_pos_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_gt(cookie, 0);
+ return 0;
+}
+
+__noinline int assert_negeq_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_le(cookie, -1);
+ return 0;
+}
+
+__noinline int assert_poseq_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_ge(cookie, 1);
+ return 0;
+}
+
+__noinline int assert_nz_gfunc_with(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_with(cookie != 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_zero_gfunc_with(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_eq_with(cookie, 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_neg_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_lt_with(cookie, 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_pos_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_gt_with(cookie, 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_negeq_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_le_with(cookie, -1, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_poseq_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_ge_with(cookie, 1, cookie + 100);
+ return 0;
+}
+
+#define check_assert(name, cookie, tag) \
+SEC("tc") \
+int exception##tag##name(struct __sk_buff *ctx) \
+{ \
+ return name(cookie) + 1; \
+}
+
+check_assert(assert_nz_gfunc, 5, _);
+check_assert(assert_zero_gfunc, 0, _);
+check_assert(assert_neg_gfunc, -100, _);
+check_assert(assert_pos_gfunc, 100, _);
+check_assert(assert_negeq_gfunc, -1, _);
+check_assert(assert_poseq_gfunc, 1, _);
+
+check_assert(assert_nz_gfunc_with, 5, _);
+check_assert(assert_zero_gfunc_with, 0, _);
+check_assert(assert_neg_gfunc_with, -100, _);
+check_assert(assert_pos_gfunc_with, 100, _);
+check_assert(assert_negeq_gfunc_with, -1, _);
+check_assert(assert_poseq_gfunc_with, 1, _);
+
+check_assert(assert_nz_gfunc, 0, _bad_);
+check_assert(assert_zero_gfunc, 5, _bad_);
+check_assert(assert_neg_gfunc, 100, _bad_);
+check_assert(assert_pos_gfunc, -100, _bad_);
+check_assert(assert_negeq_gfunc, 1, _bad_);
+check_assert(assert_poseq_gfunc, -1, _bad_);
+
+check_assert(assert_nz_gfunc_with, 0, _bad_);
+check_assert(assert_zero_gfunc_with, 5, _bad_);
+check_assert(assert_neg_gfunc_with, 100, _bad_);
+check_assert(assert_pos_gfunc_with, -100, _bad_);
+check_assert(assert_negeq_gfunc_with, 1, _bad_);
+check_assert(assert_poseq_gfunc_with, -1, _bad_);
+
+SEC("tc")
+int exception_assert_range(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range(time, 0, ~0ULL);
+ return 1;
+}
+
+SEC("tc")
+int exception_assert_range_with(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range_with(time, 0, ~0ULL, 10);
+ return 1;
+}
+
+SEC("tc")
+int exception_bad_assert_range(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range(time, -100, 100);
+ return 1;
+}
+
+SEC("tc")
+int exception_bad_assert_range_with(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range_with(time, -1000, 1000, 10);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
new file mode 100644
index 000000000000..fa35832e6748
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <limits.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#define check_assert(type, op, name, value) \
+ SEC("?tc") \
+ __log_level(2) __failure \
+ int check_assert_##op##_##name(void *ctx) \
+ { \
+ type num = bpf_ktime_get_ns(); \
+ bpf_assert_##op(num, value); \
+ return *(u64 *)num; \
+ }
+
+__msg(": R0_w=-2147483648 R10=fp0")
+check_assert(s64, eq, int_min, INT_MIN);
+__msg(": R0_w=2147483647 R10=fp0")
+check_assert(s64, eq, int_max, INT_MAX);
+__msg(": R0_w=0 R10=fp0")
+check_assert(s64, eq, zero, 0);
+__msg(": R0_w=-9223372036854775808 R1_w=-9223372036854775808 R10=fp0")
+check_assert(s64, eq, llong_min, LLONG_MIN);
+__msg(": R0_w=9223372036854775807 R1_w=9223372036854775807 R10=fp0")
+check_assert(s64, eq, llong_max, LLONG_MAX);
+
+__msg(": R0_w=scalar(smax=2147483646) R10=fp0")
+check_assert(s64, lt, pos, INT_MAX);
+__msg(": R0_w=scalar(umin=9223372036854775808,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+check_assert(s64, lt, zero, 0);
+__msg(": R0_w=scalar(umin=9223372036854775808,umax=18446744071562067967,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+check_assert(s64, lt, neg, INT_MIN);
+
+__msg(": R0_w=scalar(smax=2147483647) R10=fp0")
+check_assert(s64, le, pos, INT_MAX);
+__msg(": R0_w=scalar(smax=0) R10=fp0")
+check_assert(s64, le, zero, 0);
+__msg(": R0_w=scalar(umin=9223372036854775808,umax=18446744071562067968,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+check_assert(s64, le, neg, INT_MIN);
+
+__msg(": R0_w=scalar(umin=2147483648,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, gt, pos, INT_MAX);
+__msg(": R0_w=scalar(umin=1,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, gt, zero, 0);
+__msg(": R0_w=scalar(smin=-2147483647) R10=fp0")
+check_assert(s64, gt, neg, INT_MIN);
+
+__msg(": R0_w=scalar(umin=2147483647,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, ge, pos, INT_MAX);
+__msg(": R0_w=scalar(umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
+check_assert(s64, ge, zero, 0);
+__msg(": R0_w=scalar(smin=-2147483648) R10=fp0")
+check_assert(s64, ge, neg, INT_MIN);
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R0=0 R1=ctx(off=0,imm=0) R2=scalar(smin=-2147483646,smax=2147483645) R10=fp0")
+int check_assert_range_s64(struct __sk_buff *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ s64 num;
+
+ _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
+ if (!sk)
+ return 0;
+ num = sk->rx_queue_mapping;
+ bpf_assert_range(num, INT_MIN + 2, INT_MAX - 2);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=ctx(off=0,imm=0) R2=scalar(umin=4096,umax=8192,var_off=(0x0; 0x3fff))")
+int check_assert_range_u64(struct __sk_buff *ctx)
+{
+ u64 num = ctx->len;
+
+ bpf_assert_range(num, 4096, 8192);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R0=0 R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
+int check_assert_single_range_s64(struct __sk_buff *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ s64 num;
+
+ _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
+ if (!sk)
+ return 0;
+ num = sk->rx_queue_mapping;
+
+ bpf_assert_range(num, 4096, 4096);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
+int check_assert_single_range_u64(struct __sk_buff *ctx)
+{
+ u64 num = ctx->len;
+
+ bpf_assert_range(num, 4096, 4096);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=pkt(off=64,r=64,imm=0) R2=pkt_end(off=0,imm=0) R6=pkt(off=0,r=64,imm=0) R10=fp0")
+int check_assert_generic(struct __sk_buff *ctx)
+{
+ u8 *data_end = (void *)(long)ctx->data_end;
+ u8 *data = (void *)(long)ctx->data;
+
+ bpf_assert(data + 64 <= data_end);
+ return data[128];
+}
+
+SEC("?fentry/bpf_check")
+__failure __msg("At program exit the register R0 has value (0x40; 0x0)")
+int check_assert_with_return(void *ctx)
+{
+ bpf_assert_with(!ctx, 64);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_ext.c b/tools/testing/selftests/bpf/progs/exceptions_ext.c
new file mode 100644
index 000000000000..743c05185d9b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_ext.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+SEC("?fentry")
+int pfentry(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fentry")
+int throwing_fentry(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline int exception_cb(u64 cookie)
+{
+ return cookie + 64;
+}
+
+SEC("?freplace")
+int extension(struct __sk_buff *ctx)
+{
+ return 0;
+}
+
+SEC("?freplace")
+__exception_cb(exception_cb)
+int throwing_exception_cb_extension(u64 cookie)
+{
+ bpf_throw(32);
+ return 0;
+}
+
+SEC("?freplace")
+__exception_cb(exception_cb)
+int throwing_extension(struct __sk_buff *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+SEC("?fexit")
+int pfexit(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fexit")
+int throwing_fexit(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?fmod_ret")
+int pfmod_ret(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fmod_ret")
+int throwing_fmod_ret(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c
new file mode 100644
index 000000000000..4c39e920dac2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern void bpf_rcu_read_lock(void) __ksym;
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+struct foo {
+ struct bpf_rb_node node;
+};
+
+struct hmap_elem {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap SEC(".maps");
+
+private(A) struct bpf_spin_lock lock;
+private(A) struct bpf_rb_root rbtree __contains(foo, node);
+
+__noinline void *exception_cb_bad_ret_type(u64 cookie)
+{
+ return NULL;
+}
+
+__noinline int exception_cb_bad_arg_0(void)
+{
+ return 0;
+}
+
+__noinline int exception_cb_bad_arg_2(int a, int b)
+{
+ return 0;
+}
+
+__noinline int exception_cb_ok_arg_small(int a)
+{
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_ret_type)
+__failure __msg("Global function exception_cb_bad_ret_type() doesn't return scalar.")
+int reject_exception_cb_type_1(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_arg_0)
+__failure __msg("exception cb only supports single integer argument")
+int reject_exception_cb_type_2(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_arg_2)
+__failure __msg("exception cb only supports single integer argument")
+int reject_exception_cb_type_3(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_ok_arg_small)
+__success
+int reject_exception_cb_type_4(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline
+static int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback subprog")
+int reject_async_callback_throw(struct __sk_buff *ctx)
+{
+ struct hmap_elem *elem;
+
+ elem = bpf_map_lookup_elem(&hmap, &(int){0});
+ if (!elem)
+ return 0;
+ return bpf_timer_set_callback(&elem->timer, timer_cb);
+}
+
+__noinline static int subprog_lock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_spin_lock(&lock);
+ if (ctx->len)
+ bpf_throw(0);
+ return ret;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_with_lock(void *ctx)
+{
+ bpf_spin_lock(&lock);
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_subprog_with_lock(void *ctx)
+{
+ return subprog_lock(ctx);
+}
+
+SEC("?tc")
+__failure __msg("bpf_rcu_read_unlock is missing")
+int reject_with_rcu_read_lock(void *ctx)
+{
+ bpf_rcu_read_lock();
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int throwing_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->len)
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_rcu_read_unlock is missing")
+int reject_subprog_with_rcu_read_lock(void *ctx)
+{
+ bpf_rcu_read_lock();
+ return throwing_subprog(ctx);
+}
+
+static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2)
+{
+ bpf_throw(0);
+ return true;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_with_rbtree_add_throw(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&rbtree, &f->node, rbless);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_reference(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int subprog_ref(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int subprog_cb_ref(u32 i, void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_cb_reference(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_loop(5, subprog_cb_ref, NULL, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_with_cb(void *ctx)
+{
+ bpf_loop(5, subprog_cb_ref, NULL, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_subprog_reference(void *ctx)
+{
+ return subprog_ref(ctx) + 1;
+}
+
+__noinline int throwing_exception_cb(u64 c)
+{
+ bpf_throw(0);
+ return c;
+}
+
+__noinline int exception_cb1(u64 c)
+{
+ return c;
+}
+
+__noinline int exception_cb2(u64 c)
+{
+ return c;
+}
+
+static __noinline int static_func(struct __sk_buff *ctx)
+{
+ return exception_cb1(ctx->tstamp);
+}
+
+__noinline int global_func(struct __sk_buff *ctx)
+{
+ return exception_cb1(ctx->tstamp);
+}
+
+SEC("?tc")
+__exception_cb(throwing_exception_cb)
+__failure __msg("cannot be called from callback subprog")
+int reject_throwing_exception_cb(struct __sk_buff *ctx)
+{
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__failure __msg("cannot call exception cb directly")
+int reject_exception_cb_call_global_func(struct __sk_buff *ctx)
+{
+ return global_func(ctx);
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__failure __msg("cannot call exception cb directly")
+int reject_exception_cb_call_static_func(struct __sk_buff *ctx)
+{
+ return static_func(ctx);
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__exception_cb(exception_cb2)
+__failure __msg("multiple exception callback tags for main subprog")
+int reject_multiple_exception_cb(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 16;
+}
+
+__noinline int exception_cb_bad_ret(u64 c)
+{
+ return c;
+}
+
+SEC("?fentry/bpf_check")
+__exception_cb(exception_cb_bad_ret)
+__failure __msg("At program exit the register R0 has unknown scalar value should")
+int reject_set_exception_cb_bad_ret1(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fentry/bpf_check")
+__failure __msg("At program exit the register R0 has value (0x40; 0x0) should")
+int reject_set_exception_cb_bad_ret2(void *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+__noinline static int loop_cb1(u32 index, int *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int loop_cb2(u32 index, int *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_exception_throw_cb(struct __sk_buff *ctx)
+{
+ bpf_loop(5, loop_cb1, NULL, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_exception_throw_cb_diff(struct __sk_buff *ctx)
+{
+ if (ctx->protocol)
+ bpf_loop(5, loop_cb1, NULL, 0);
+ else
+ bpf_loop(5, loop_cb2, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
new file mode 100644
index 000000000000..bbc45346e006
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
@@ -0,0 +1,183 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test_array_map_1)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Update percpu data */
+SEC("?fentry/bpf_fentry_test2")
+int BPF_PROG(test_array_map_2)
+{
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+
+/* Summarize percpu data */
+SEC("?fentry/bpf_fentry_test3")
+int BPF_PROG(test_array_map_3)
+{
+ struct val_t __percpu_kptr *p;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+/* Explicitly free allocated percpu data */
+SEC("?fentry/bpf_fentry_test4")
+int BPF_PROG(test_array_map_4)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ /* delete */
+ p = bpf_kptr_xchg(&e->pc, NULL);
+ if (p) {
+ bpf_percpu_obj_drop(p);
+ }
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+int BPF_PROG(test_array_map_10)
+{
+ struct val_t __percpu_kptr *p, *p1;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ bpf_rcu_read_lock();
+ p = e->pc;
+ if (!p) {
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ goto out;
+
+ p1 = bpf_kptr_xchg(&e->pc, p);
+ if (p1) {
+ /* race condition */
+ bpf_percpu_obj_drop(p1);
+ }
+ }
+
+ v = bpf_this_cpu_ptr(p);
+ v->c = 3;
+ v = bpf_this_cpu_ptr(p);
+ v->c = 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ goto out;
+ v->c = 1;
+ v->d = 2;
+
+ /* delete */
+ p1 = bpf_kptr_xchg(&e->pc, NULL);
+ if (!p1)
+ goto out;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ /* finally release p */
+ bpf_percpu_obj_drop(p1);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
new file mode 100644
index 000000000000..1c36a241852c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
@@ -0,0 +1,105 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct elem);
+} cgrp SEC(".maps");
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test_cgrp_local_storage_1)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Percpu data collection */
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG(test_cgrp_local_storage_2)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+
+/* Summarize percpu data collection */
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG(test_cgrp_local_storage_3)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+ int i;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
new file mode 100644
index 000000000000..1a891d30f1fe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
@@ -0,0 +1,164 @@
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct val2_t {
+ long b;
+};
+
+struct val_with_ptr_t {
+ char *p;
+};
+
+struct val_with_rb_root_t {
+ struct bpf_spin_lock lock;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+long ret;
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("store to referenced kptr disallowed")
+int BPF_PROG(test_array_map_1)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ e->pc = (struct val_t __percpu_kptr *)ret;
+ return 0;
+}
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("invalid kptr access, R2 type=percpu_ptr_val2_t expected=ptr_val_t")
+int BPF_PROG(test_array_map_2)
+{
+ struct val2_t __percpu_kptr *p2;
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p2 = bpf_percpu_obj_new(struct val2_t);
+ if (!p2)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p2);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("R1 type=scalar expected=percpu_ptr_, percpu_rcu_ptr_, percpu_trusted_ptr_")
+int BPF_PROG(test_array_map_3)
+{
+ struct val_t __percpu_kptr *p, *p1;
+ struct val_t *v;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p1 = bpf_kptr_xchg(&e->pc, p);
+ if (p1)
+ bpf_percpu_obj_drop(p1);
+
+ v = bpf_this_cpu_ptr(p);
+ ret = v->b;
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()")
+int BPF_PROG(test_array_map_4)
+{
+ struct val_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ bpf_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_obj_drop_impl()")
+int BPF_PROG(test_array_map_5)
+{
+ struct val_t *p;
+
+ p = bpf_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must be of a struct of scalars")
+int BPF_PROG(test_array_map_6)
+{
+ struct val_with_ptr_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_with_ptr_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must not contain special fields")
+int BPF_PROG(test_array_map_7)
+{
+ struct val_with_rb_root_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_with_rb_root_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
new file mode 100644
index 000000000000..55907ef961bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+
+struct bin_data {
+ char data[256];
+ struct bpf_spin_lock lock;
+};
+
+struct map_value {
+ struct bin_data __kptr * data;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2048);
+} array SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+bool nomem_err = false;
+
+static int del_array(unsigned int i, int *from)
+{
+ struct map_value *value;
+ struct bin_data *old;
+
+ value = bpf_map_lookup_elem(&array, from);
+ if (!value)
+ return 1;
+
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (old)
+ bpf_obj_drop(old);
+
+ (*from)++;
+ return 0;
+}
+
+static int add_array(unsigned int i, int *from)
+{
+ struct bin_data *old, *new;
+ struct map_value *value;
+
+ value = bpf_map_lookup_elem(&array, from);
+ if (!value)
+ return 1;
+
+ new = bpf_obj_new(typeof(*new));
+ if (!new) {
+ nomem_err = true;
+ return 1;
+ }
+
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old)
+ bpf_obj_drop(old);
+
+ (*from)++;
+ return 0;
+}
+
+static void del_then_add_array(int from)
+{
+ int i;
+
+ i = from;
+ bpf_loop(512, del_array, &i, 0);
+
+ i = from;
+ bpf_loop(512, add_array, &i, 0);
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG2(test0, int, a)
+{
+ del_then_add_array(0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG2(test1, int, a, u64, b)
+{
+ del_then_add_array(512);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG2(test2, char, a, int, b, u64, c)
+{
+ del_then_add_array(1024);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test4")
+int BPF_PROG2(test3, void *, a, char, b, int, c, u64, d)
+{
+ del_then_add_array(1536);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
new file mode 100644
index 000000000000..8436c6729167
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int count = 0;
+
+SEC("fentry/subprog_tail")
+int BPF_PROG(fentry, struct sk_buff *skb)
+{
+ count++;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
new file mode 100644
index 000000000000..fe16412c6e6e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int count = 0;
+
+SEC("fexit/subprog_tail")
+int BPF_PROG(fexit, struct sk_buff *skb)
+{
+ count++;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c
index 8893094725f0..5d54f8eae6a1 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bswap.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c
@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("BSWAP, 16")
diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c
index 2dae5322a18e..aa54ecd5829e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_gotol.c
+++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c
@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("gotol, small_imm")
diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
index 0c638f45aaf1..1e1bc379c44f 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("LDSX, S8")
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
index 3c8ac2c57b1b..ca11fd5dafd1 100644
--- a/tools/testing/selftests/bpf/progs/verifier_movsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("MOV32SX, S8")
diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
index 0990f8825675..fb039722b639 100644
--- a/tools/testing/selftests/bpf/progs/verifier_sdiv.c
+++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
+ __clang_major__ >= 18
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 1")
diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
index 0cfece7ff4f8..0ed67b6b31dd 100755
--- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py
+++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
@@ -509,6 +509,15 @@ def main():
source_map_types.remove('cgroup_storage_deprecated')
source_map_types.add('cgroup_storage')
+ # The same applied to BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED and
+ # BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE which share the same enum value
+ # and source_map_types picks
+ # BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED/percpu_cgroup_storage_deprecated.
+ # Replace 'percpu_cgroup_storage_deprecated' with 'percpu_cgroup_storage'
+ # so it aligns with what `bpftool map help` shows.
+ source_map_types.remove('percpu_cgroup_storage_deprecated')
+ source_map_types.add('percpu_cgroup_storage')
+
help_map_types = map_info.get_map_help()
help_map_options = map_info.get_options()
map_info.close()
diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
index 2aa5a3445056..65aafe0003db 100755
--- a/tools/testing/selftests/bpf/test_xsk.sh
+++ b/tools/testing/selftests/bpf/test_xsk.sh
@@ -73,17 +73,33 @@
#
# Run test suite for physical device in loopback mode
# sudo ./test_xsk.sh -i IFACE
+#
+# Run test suite in a specific mode only [skb,drv,zc]
+# sudo ./test_xsk.sh -m MODE
+#
+# List available tests
+# ./test_xsk.sh -l
+#
+# Run a specific test from the test suite
+# sudo ./test_xsk.sh -t TEST_NAME
+#
+# Display the available command line options
+# ./test_xsk.sh -h
. xsk_prereqs.sh
ETH=""
-while getopts "vi:d" flag
+while getopts "vi:dm:lt:h" flag
do
case "${flag}" in
v) verbose=1;;
d) debug=1;;
i) ETH=${OPTARG};;
+ m) MODE=${OPTARG};;
+ l) list=1;;
+ t) TEST=${OPTARG};;
+ h) help=1;;
esac
done
@@ -131,6 +147,16 @@ setup_vethPairs() {
ip link set ${VETH0} up
}
+if [[ $list -eq 1 ]]; then
+ ./${XSKOBJ} -l
+ exit
+fi
+
+if [[ $help -eq 1 ]]; then
+ ./${XSKOBJ}
+ exit
+fi
+
if [ ! -z $ETH ]; then
VETH0=${ETH}
VETH1=${ETH}
@@ -153,6 +179,14 @@ if [[ $verbose -eq 1 ]]; then
ARGS+="-v "
fi
+if [ -n "$MODE" ]; then
+ ARGS+="-m ${MODE} "
+fi
+
+if [ -n "$TEST" ]; then
+ ARGS+="-t ${TEST} "
+fi
+
retval=$?
test_status $retval "${TEST_NAME}"
@@ -175,6 +209,10 @@ else
cleanup_iface ${ETH} ${MTU}
fi
+if [[ $list -eq 1 ]]; then
+ exit
+fi
+
TEST_NAME="XSK_SELFTESTS_${VETH0}_BUSY_POLL"
busy_poll=1
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index f83d9f65c65b..4faa898ff7fc 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -7,6 +7,7 @@
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
+#include <pthread.h>
#include <unistd.h>
#include <linux/perf_event.h>
#include <sys/mman.h>
@@ -14,104 +15,165 @@
#include <linux/limits.h>
#include <libelf.h>
#include <gelf.h>
+#include "bpf/libbpf_internal.h"
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
-#define MAX_SYMS 400000
-static struct ksym syms[MAX_SYMS];
-static int sym_cnt;
+struct ksyms {
+ struct ksym *syms;
+ size_t sym_cap;
+ size_t sym_cnt;
+};
+
+static struct ksyms *ksyms;
+static pthread_mutex_t ksyms_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static int ksyms__add_symbol(struct ksyms *ksyms, const char *name,
+ unsigned long addr)
+{
+ void *tmp;
+
+ tmp = strdup(name);
+ if (!tmp)
+ return -ENOMEM;
+ ksyms->syms[ksyms->sym_cnt].addr = addr;
+ ksyms->syms[ksyms->sym_cnt].name = tmp;
+ ksyms->sym_cnt++;
+ return 0;
+}
+
+void free_kallsyms_local(struct ksyms *ksyms)
+{
+ unsigned int i;
+
+ if (!ksyms)
+ return;
+
+ if (!ksyms->syms) {
+ free(ksyms);
+ return;
+ }
+
+ for (i = 0; i < ksyms->sym_cnt; i++)
+ free(ksyms->syms[i].name);
+ free(ksyms->syms);
+ free(ksyms);
+}
static int ksym_cmp(const void *p1, const void *p2)
{
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
}
-int load_kallsyms_refresh(void)
+struct ksyms *load_kallsyms_local(void)
{
FILE *f;
char func[256], buf[256];
char symbol;
void *addr;
- int i = 0;
-
- sym_cnt = 0;
+ int ret;
+ struct ksyms *ksyms;
f = fopen("/proc/kallsyms", "r");
if (!f)
- return -ENOENT;
+ return NULL;
+
+ ksyms = calloc(1, sizeof(struct ksyms));
+ if (!ksyms) {
+ fclose(f);
+ return NULL;
+ }
while (fgets(buf, sizeof(buf), f)) {
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;
if (!addr)
continue;
- if (i >= MAX_SYMS)
- return -EFBIG;
- syms[i].addr = (long) addr;
- syms[i].name = strdup(func);
- i++;
+ ret = libbpf_ensure_mem((void **) &ksyms->syms, &ksyms->sym_cap,
+ sizeof(struct ksym), ksyms->sym_cnt + 1);
+ if (ret)
+ goto error;
+ ret = ksyms__add_symbol(ksyms, func, (unsigned long)addr);
+ if (ret)
+ goto error;
}
fclose(f);
- sym_cnt = i;
- qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
- return 0;
+ qsort(ksyms->syms, ksyms->sym_cnt, sizeof(struct ksym), ksym_cmp);
+ return ksyms;
+
+error:
+ fclose(f);
+ free_kallsyms_local(ksyms);
+ return NULL;
}
int load_kallsyms(void)
{
- /*
- * This is called/used from multiplace places,
- * load symbols just once.
- */
- if (sym_cnt)
- return 0;
- return load_kallsyms_refresh();
+ pthread_mutex_lock(&ksyms_mutex);
+ if (!ksyms)
+ ksyms = load_kallsyms_local();
+ pthread_mutex_unlock(&ksyms_mutex);
+ return ksyms ? 0 : 1;
}
-struct ksym *ksym_search(long key)
+struct ksym *ksym_search_local(struct ksyms *ksyms, long key)
{
- int start = 0, end = sym_cnt;
+ int start = 0, end = ksyms->sym_cnt;
int result;
/* kallsyms not loaded. return NULL */
- if (sym_cnt <= 0)
+ if (ksyms->sym_cnt <= 0)
return NULL;
while (start < end) {
size_t mid = start + (end - start) / 2;
- result = key - syms[mid].addr;
+ result = key - ksyms->syms[mid].addr;
if (result < 0)
end = mid;
else if (result > 0)
start = mid + 1;
else
- return &syms[mid];
+ return &ksyms->syms[mid];
}
- if (start >= 1 && syms[start - 1].addr < key &&
- key < syms[start].addr)
+ if (start >= 1 && ksyms->syms[start - 1].addr < key &&
+ key < ksyms->syms[start].addr)
/* valid ksym */
- return &syms[start - 1];
+ return &ksyms->syms[start - 1];
/* out of range. return _stext */
- return &syms[0];
+ return &ksyms->syms[0];
}
-long ksym_get_addr(const char *name)
+struct ksym *ksym_search(long key)
+{
+ if (!ksyms)
+ return NULL;
+ return ksym_search_local(ksyms, key);
+}
+
+long ksym_get_addr_local(struct ksyms *ksyms, const char *name)
{
int i;
- for (i = 0; i < sym_cnt; i++) {
- if (strcmp(syms[i].name, name) == 0)
- return syms[i].addr;
+ for (i = 0; i < ksyms->sym_cnt; i++) {
+ if (strcmp(ksyms->syms[i].name, name) == 0)
+ return ksyms->syms[i].addr;
}
return 0;
}
+long ksym_get_addr(const char *name)
+{
+ if (!ksyms)
+ return 0;
+ return ksym_get_addr_local(ksyms, name);
+}
+
/* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
* this is faster than load + find.
*/
diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h
index 876f3e711df6..04fd1da7079d 100644
--- a/tools/testing/selftests/bpf/trace_helpers.h
+++ b/tools/testing/selftests/bpf/trace_helpers.h
@@ -11,13 +11,17 @@ struct ksym {
long addr;
char *name;
};
+struct ksyms;
int load_kallsyms(void);
-int load_kallsyms_refresh(void);
-
struct ksym *ksym_search(long key);
long ksym_get_addr(const char *name);
+struct ksyms *load_kallsyms_local(void);
+struct ksym *ksym_search_local(struct ksyms *ksyms, long key);
+long ksym_get_addr_local(struct ksyms *ksyms, const char *name);
+void free_kallsyms_local(struct ksyms *ksyms);
+
/* open kallsyms and find addresses on the fly, faster than load + search. */
int kallsyms_find(const char *sym, unsigned long long *addr);
diff --git a/tools/testing/selftests/bpf/xsk_prereqs.sh b/tools/testing/selftests/bpf/xsk_prereqs.sh
index 29175682c44d..47c7b8064f38 100755
--- a/tools/testing/selftests/bpf/xsk_prereqs.sh
+++ b/tools/testing/selftests/bpf/xsk_prereqs.sh
@@ -83,9 +83,11 @@ exec_xskxceiver()
fi
./${XSKOBJ} -i ${VETH0} -i ${VETH1} ${ARGS}
-
retval=$?
- test_status $retval "${TEST_NAME}"
- statusList+=($retval)
- nameList+=(${TEST_NAME})
+
+ if [[ $list -ne 1 ]]; then
+ test_status $retval "${TEST_NAME}"
+ statusList+=($retval)
+ nameList+=(${TEST_NAME})
+ fi
}
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 2827f2d7cf30..43e0a5796929 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -107,6 +107,11 @@
static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
+static bool opt_verbose;
+static bool opt_print_tests;
+static enum test_mode opt_mode = TEST_MODE_ALL;
+static u32 opt_run_test = RUN_ALL_TESTS;
+
static void __exit_with_error(int error, const char *file, const char *func, int line)
{
ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
@@ -310,19 +315,28 @@ static struct option long_options[] = {
{"interface", required_argument, 0, 'i'},
{"busy-poll", no_argument, 0, 'b'},
{"verbose", no_argument, 0, 'v'},
+ {"mode", required_argument, 0, 'm'},
+ {"list", no_argument, 0, 'l'},
+ {"test", required_argument, 0, 't'},
+ {"help", no_argument, 0, 'h'},
{0, 0, 0, 0}
};
-static void usage(const char *prog)
+static void print_usage(char **argv)
{
const char *str =
- " Usage: %s [OPTIONS]\n"
+ " Usage: xskxceiver [OPTIONS]\n"
" Options:\n"
" -i, --interface Use interface\n"
" -v, --verbose Verbose output\n"
- " -b, --busy-poll Enable busy poll\n";
+ " -b, --busy-poll Enable busy poll\n"
+ " -m, --mode Run only mode skb, drv, or zc\n"
+ " -l, --list List all available tests\n"
+ " -t, --test Run a specific test. Enter number from -l option.\n"
+ " -h, --help Display this help and exit\n";
- ksft_print_msg(str, prog);
+ ksft_print_msg(str, basename(argv[0]));
+ ksft_exit_xfail();
}
static bool validate_interface(struct ifobject *ifobj)
@@ -342,7 +356,7 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "i:vb", long_options, &option_index);
+ c = getopt_long(argc, argv, "i:vbm:lt:", long_options, &option_index);
if (c == -1)
break;
@@ -371,9 +385,28 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
ifobj_tx->busy_poll = true;
ifobj_rx->busy_poll = true;
break;
+ case 'm':
+ if (!strncmp("skb", optarg, strlen(optarg)))
+ opt_mode = TEST_MODE_SKB;
+ else if (!strncmp("drv", optarg, strlen(optarg)))
+ opt_mode = TEST_MODE_DRV;
+ else if (!strncmp("zc", optarg, strlen(optarg)))
+ opt_mode = TEST_MODE_ZC;
+ else
+ print_usage(argv);
+ break;
+ case 'l':
+ opt_print_tests = true;
+ break;
+ case 't':
+ errno = 0;
+ opt_run_test = strtol(optarg, NULL, 0);
+ if (errno)
+ print_usage(argv);
+ break;
+ case 'h':
default:
- usage(basename(argv[0]));
- ksft_exit_xfail();
+ print_usage(argv);
}
}
}
@@ -427,7 +460,8 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
}
static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
- struct ifobject *ifobj_rx, enum test_mode mode)
+ struct ifobject *ifobj_rx, enum test_mode mode,
+ const struct test_spec *test_to_run)
{
struct pkt_stream *tx_pkt_stream;
struct pkt_stream *rx_pkt_stream;
@@ -449,6 +483,8 @@ static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
ifobj->bind_flags |= XDP_COPY;
}
+ strncpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
+ test->test_func = test_to_run->test_func;
test->mode = mode;
__test_spec_init(test, ifobj_tx, ifobj_rx);
}
@@ -458,11 +494,6 @@ static void test_spec_reset(struct test_spec *test)
__test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
}
-static void test_spec_set_name(struct test_spec *test, const char *name)
-{
- strncpy(test->name, name, MAX_TEST_NAME_SIZE);
-}
-
static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
struct bpf_map *xskmap_tx)
@@ -747,6 +778,9 @@ static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, s
len = 0;
}
+ print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
+ pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
+
if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
pkt_stream->max_pkt_len = pkt->len;
pkt_nb++;
@@ -777,7 +811,7 @@ static void pkt_print_data(u32 *data, u32 cnt)
seqnum = ntohl(*data) & 0xffff;
pkt_nb = ntohl(*data) >> 16;
- fprintf(stdout, "%u:%u ", pkt_nb, seqnum);
+ ksft_print_msg("%u:%u ", pkt_nb, seqnum);
data++;
}
}
@@ -789,13 +823,13 @@ static void pkt_dump(void *pkt, u32 len, bool eth_header)
if (eth_header) {
/*extract L2 frame */
- fprintf(stdout, "DEBUG>> L2: dst mac: ");
+ ksft_print_msg("DEBUG>> L2: dst mac: ");
for (i = 0; i < ETH_ALEN; i++)
- fprintf(stdout, "%02X", ethhdr->h_dest[i]);
+ ksft_print_msg("%02X", ethhdr->h_dest[i]);
- fprintf(stdout, "\nDEBUG>> L2: src mac: ");
+ ksft_print_msg("\nDEBUG>> L2: src mac: ");
for (i = 0; i < ETH_ALEN; i++)
- fprintf(stdout, "%02X", ethhdr->h_source[i]);
+ ksft_print_msg("%02X", ethhdr->h_source[i]);
data = pkt + PKT_HDR_SIZE;
} else {
@@ -803,15 +837,15 @@ static void pkt_dump(void *pkt, u32 len, bool eth_header)
}
/*extract L5 frame */
- fprintf(stdout, "\nDEBUG>> L5: seqnum: ");
+ ksft_print_msg("\nDEBUG>> L5: seqnum: ");
pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
- fprintf(stdout, "....");
+ ksft_print_msg("....");
if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
- fprintf(stdout, "\n.... ");
+ ksft_print_msg("\n.... ");
pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
PKT_DUMP_NB_TO_PRINT);
}
- fprintf(stdout, "\n---------------------------------------\n");
+ ksft_print_msg("\n---------------------------------------\n");
}
static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
@@ -916,36 +950,42 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
return true;
}
-static void kick_tx(struct xsk_socket_info *xsk)
+static int kick_tx(struct xsk_socket_info *xsk)
{
int ret;
ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
if (ret >= 0)
- return;
+ return TEST_PASS;
if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
usleep(100);
- return;
+ return TEST_PASS;
}
- exit_with_error(errno);
+ return TEST_FAILURE;
}
-static void kick_rx(struct xsk_socket_info *xsk)
+static int kick_rx(struct xsk_socket_info *xsk)
{
int ret;
ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
if (ret < 0)
- exit_with_error(errno);
+ return TEST_FAILURE;
+
+ return TEST_PASS;
}
static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
{
unsigned int rcvd;
u32 idx;
+ int ret;
- if (xsk_ring_prod__needs_wakeup(&xsk->tx))
- kick_tx(xsk);
+ if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
+ ret = kick_tx(xsk);
+ if (ret)
+ return TEST_FAILURE;
+ }
rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
if (rcvd) {
@@ -993,11 +1033,14 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
return TEST_FAILURE;
}
- kick_rx(xsk);
+ ret = kick_rx(xsk);
+ if (ret)
+ return TEST_FAILURE;
+
if (ifobj->use_poll) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret < 0)
- exit_with_error(errno);
+ return TEST_FAILURE;
if (!ret) {
if (!is_umem_valid(test->ifobj_tx))
@@ -1018,12 +1061,10 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
if (ifobj->use_fill_ring) {
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
while (ret != rcvd) {
- if (ret < 0)
- exit_with_error(-ret);
if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret < 0)
- exit_with_error(errno);
+ return TEST_FAILURE;
}
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
@@ -1042,6 +1083,9 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
return TEST_FAILURE;
}
+ print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
+ addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
+
if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
!is_offset_correct(umem, pkt, addr) ||
(ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr)))
@@ -1104,7 +1148,9 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
/* pkts_in_flight might be negative if many invalid packets are sent */
if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) {
- kick_tx(xsk);
+ ret = kick_tx(xsk);
+ if (ret)
+ return TEST_FAILURE;
return TEST_CONTINUE;
}
@@ -1165,6 +1211,9 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
bytes_written);
bytes_written += tx_desc->len;
+ print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
+ tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
+
if (nb_frags_left) {
i++;
if (pkt_stream->verbatim)
@@ -1207,10 +1256,29 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
return TEST_CONTINUE;
}
-static void wait_for_tx_completion(struct xsk_socket_info *xsk)
+static int wait_for_tx_completion(struct xsk_socket_info *xsk)
{
- while (xsk->outstanding_tx)
+ struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
+ int ret;
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+ timeradd(&tv_now, &tv_timeout, &tv_end);
+
+ while (xsk->outstanding_tx) {
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+ if (timercmp(&tv_now, &tv_end, >)) {
+ ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
+ return TEST_FAILURE;
+ }
+
complete_pkts(xsk, BATCH_SIZE);
+ }
+
+ return TEST_PASS;
}
static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
@@ -1233,8 +1301,7 @@ static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
return ret;
}
- wait_for_tx_completion(ifobject->xsk);
- return TEST_PASS;
+ return wait_for_tx_completion(ifobject->xsk);
}
static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
@@ -1266,7 +1333,9 @@ static int validate_rx_dropped(struct ifobject *ifobject)
struct xdp_statistics stats;
int err;
- kick_rx(ifobject->xsk);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
err = get_xsk_stats(xsk, &stats);
if (err)
@@ -1292,7 +1361,9 @@ static int validate_rx_full(struct ifobject *ifobject)
int err;
usleep(1000);
- kick_rx(ifobject->xsk);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
err = get_xsk_stats(xsk, &stats);
if (err)
@@ -1311,7 +1382,9 @@ static int validate_fill_empty(struct ifobject *ifobject)
int err;
usleep(1000);
- kick_rx(ifobject->xsk);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
err = get_xsk_stats(xsk, &stats);
if (err)
@@ -1475,8 +1548,6 @@ static void *worker_testapp_validate_tx(void *arg)
thread_common_ops_tx(test, ifobject);
}
- print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
- ifobject->ifname);
err = send_pkts(test, ifobject);
if (!err && ifobject->validation_func)
@@ -1500,7 +1571,8 @@ static void *worker_testapp_validate_rx(void *arg)
xsk_clear_xskmap(ifobject->xskmap);
err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
if (err) {
- printf("Error: Failed to update xskmap, error %s\n", strerror(-err));
+ ksft_print_msg("Error: Failed to update xskmap, error %s\n",
+ strerror(-err));
exit_with_error(-err);
}
}
@@ -1564,7 +1636,7 @@ static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_pro
xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
if (err) {
- printf("Error attaching XDP program\n");
+ ksft_print_msg("Error attaching XDP program\n");
exit_with_error(-err);
}
@@ -1682,7 +1754,6 @@ static int testapp_teardown(struct test_spec *test)
{
int i;
- test_spec_set_name(test, "TEARDOWN");
for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
if (testapp_validate_traffic(test))
return TEST_FAILURE;
@@ -1704,18 +1775,17 @@ static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
*ifobj2 = tmp_ifobj;
}
-static int testapp_bidi(struct test_spec *test)
+static int testapp_bidirectional(struct test_spec *test)
{
int res;
- test_spec_set_name(test, "BIDIRECTIONAL");
test->ifobj_tx->rx_on = true;
test->ifobj_rx->tx_on = true;
test->total_steps = 2;
if (testapp_validate_traffic(test))
return TEST_FAILURE;
- print_verbose("Switching Tx/Rx vectors\n");
+ print_verbose("Switching Tx/Rx direction\n");
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
@@ -1723,7 +1793,7 @@ static int testapp_bidi(struct test_spec *test)
return res;
}
-static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
+static int swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
{
int ret;
@@ -1734,31 +1804,31 @@ static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj
ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk);
if (ret)
- exit_with_error(errno);
+ return TEST_FAILURE;
+
+ return TEST_PASS;
}
-static int testapp_bpf_res(struct test_spec *test)
+static int testapp_xdp_prog_cleanup(struct test_spec *test)
{
- test_spec_set_name(test, "BPF_RES");
test->total_steps = 2;
test->nb_sockets = 2;
if (testapp_validate_traffic(test))
return TEST_FAILURE;
- swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
+ if (swap_xsk_resources(test->ifobj_tx, test->ifobj_rx))
+ return TEST_FAILURE;
return testapp_validate_traffic(test);
}
static int testapp_headroom(struct test_spec *test)
{
- test_spec_set_name(test, "UMEM_HEADROOM");
test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
return testapp_validate_traffic(test);
}
static int testapp_stats_rx_dropped(struct test_spec *test)
{
- test_spec_set_name(test, "STAT_RX_DROPPED");
if (test->mode == TEST_MODE_ZC) {
ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
return TEST_SKIP;
@@ -1774,7 +1844,6 @@ static int testapp_stats_rx_dropped(struct test_spec *test)
static int testapp_stats_tx_invalid_descs(struct test_spec *test)
{
- test_spec_set_name(test, "STAT_TX_INVALID");
pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
test->ifobj_tx->validation_func = validate_tx_invalid_descs;
return testapp_validate_traffic(test);
@@ -1782,7 +1851,6 @@ static int testapp_stats_tx_invalid_descs(struct test_spec *test)
static int testapp_stats_rx_full(struct test_spec *test)
{
- test_spec_set_name(test, "STAT_RX_FULL");
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
@@ -1795,7 +1863,6 @@ static int testapp_stats_rx_full(struct test_spec *test)
static int testapp_stats_fill_empty(struct test_spec *test)
{
- test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
@@ -1805,9 +1872,8 @@ static int testapp_stats_fill_empty(struct test_spec *test)
return testapp_validate_traffic(test);
}
-static int testapp_unaligned(struct test_spec *test)
+static int testapp_send_receive_unaligned(struct test_spec *test)
{
- test_spec_set_name(test, "UNALIGNED_MODE");
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
/* Let half of the packets straddle a 4K buffer boundary */
@@ -1816,9 +1882,8 @@ static int testapp_unaligned(struct test_spec *test)
return testapp_validate_traffic(test);
}
-static int testapp_unaligned_mb(struct test_spec *test)
+static int testapp_send_receive_unaligned_mb(struct test_spec *test)
{
- test_spec_set_name(test, "UNALIGNED_MODE_9K");
test->mtu = MAX_ETH_JUMBO_SIZE;
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
@@ -1834,9 +1899,8 @@ static int testapp_single_pkt(struct test_spec *test)
return testapp_validate_traffic(test);
}
-static int testapp_multi_buffer(struct test_spec *test)
+static int testapp_send_receive_mb(struct test_spec *test)
{
- test_spec_set_name(test, "RUN_TO_COMPLETION_9K_PACKETS");
test->mtu = MAX_ETH_JUMBO_SIZE;
pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE);
@@ -1933,7 +1997,6 @@ static int testapp_xdp_drop(struct test_spec *test)
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
- test_spec_set_name(test, "XDP_DROP_HALF");
test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
skel_rx->maps.xsk, skel_tx->maps.xsk);
@@ -1941,7 +2004,7 @@ static int testapp_xdp_drop(struct test_spec *test)
return testapp_validate_traffic(test);
}
-static int testapp_xdp_metadata_count(struct test_spec *test)
+static int testapp_xdp_metadata_copy(struct test_spec *test)
{
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
@@ -1955,19 +2018,21 @@ static int testapp_xdp_metadata_count(struct test_spec *test)
test->ifobj_rx->use_metadata = true;
data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
- if (!data_map || !bpf_map__is_internal(data_map))
- exit_with_error(ENOMEM);
+ if (!data_map || !bpf_map__is_internal(data_map)) {
+ ksft_print_msg("Error: could not find bss section of XDP program\n");
+ return TEST_FAILURE;
+ }
- if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY))
- exit_with_error(errno);
+ if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) {
+ ksft_print_msg("Error: could not update count element\n");
+ return TEST_FAILURE;
+ }
return testapp_validate_traffic(test);
}
static int testapp_poll_txq_tmout(struct test_spec *test)
{
- test_spec_set_name(test, "POLL_TXQ_FULL");
-
test->ifobj_tx->use_poll = true;
/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
test->ifobj_tx->umem->frame_size = 2048;
@@ -1977,7 +2042,6 @@ static int testapp_poll_txq_tmout(struct test_spec *test)
static int testapp_poll_rxq_tmout(struct test_spec *test)
{
- test_spec_set_name(test, "POLL_RXQ_EMPTY");
test->ifobj_rx->use_poll = true;
return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
}
@@ -1987,7 +2051,6 @@ static int testapp_too_many_frags(struct test_spec *test)
struct pkt pkts[2 * XSK_DESC__MAX_SKB_FRAGS + 2] = {};
u32 max_frags, i;
- test_spec_set_name(test, "TOO_MANY_FRAGS");
if (test->mode == TEST_MODE_ZC)
max_frags = test->ifobj_tx->xdp_zc_max_segs;
else
@@ -2067,7 +2130,7 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *
err = xsk_load_xdp_programs(ifobj);
if (err) {
- printf("Error loading XDP program\n");
+ ksft_print_msg("Error loading XDP program\n");
exit_with_error(err);
}
@@ -2091,138 +2154,98 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *
}
}
-static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
-{
- int ret = TEST_SKIP;
-
- switch (type) {
- case TEST_TYPE_STATS_RX_DROPPED:
- ret = testapp_stats_rx_dropped(test);
- break;
- case TEST_TYPE_STATS_TX_INVALID_DESCS:
- ret = testapp_stats_tx_invalid_descs(test);
- break;
- case TEST_TYPE_STATS_RX_FULL:
- ret = testapp_stats_rx_full(test);
- break;
- case TEST_TYPE_STATS_FILL_EMPTY:
- ret = testapp_stats_fill_empty(test);
- break;
- case TEST_TYPE_TEARDOWN:
- ret = testapp_teardown(test);
- break;
- case TEST_TYPE_BIDI:
- ret = testapp_bidi(test);
- break;
- case TEST_TYPE_BPF_RES:
- ret = testapp_bpf_res(test);
- break;
- case TEST_TYPE_RUN_TO_COMPLETION:
- test_spec_set_name(test, "RUN_TO_COMPLETION");
- ret = testapp_validate_traffic(test);
- break;
- case TEST_TYPE_RUN_TO_COMPLETION_MB:
- ret = testapp_multi_buffer(test);
- break;
- case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
- test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
- ret = testapp_single_pkt(test);
- break;
- case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
- test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
- test->ifobj_tx->umem->frame_size = 2048;
- test->ifobj_rx->umem->frame_size = 2048;
- pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
- ret = testapp_validate_traffic(test);
- break;
- case TEST_TYPE_RX_POLL:
- test->ifobj_rx->use_poll = true;
- test_spec_set_name(test, "POLL_RX");
- ret = testapp_validate_traffic(test);
- break;
- case TEST_TYPE_TX_POLL:
- test->ifobj_tx->use_poll = true;
- test_spec_set_name(test, "POLL_TX");
- ret = testapp_validate_traffic(test);
- break;
- case TEST_TYPE_POLL_TXQ_TMOUT:
- ret = testapp_poll_txq_tmout(test);
- break;
- case TEST_TYPE_POLL_RXQ_TMOUT:
- ret = testapp_poll_rxq_tmout(test);
- break;
- case TEST_TYPE_ALIGNED_INV_DESC:
- test_spec_set_name(test, "ALIGNED_INV_DESC");
- ret = testapp_invalid_desc(test);
- break;
- case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
- test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
- test->ifobj_tx->umem->frame_size = 2048;
- test->ifobj_rx->umem->frame_size = 2048;
- ret = testapp_invalid_desc(test);
- break;
- case TEST_TYPE_UNALIGNED_INV_DESC:
- test_spec_set_name(test, "UNALIGNED_INV_DESC");
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- ret = testapp_invalid_desc(test);
- break;
- case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: {
- u64 page_size, umem_size;
-
- test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE");
- /* Odd frame size so the UMEM doesn't end near a page boundary. */
- test->ifobj_tx->umem->frame_size = 4001;
- test->ifobj_rx->umem->frame_size = 4001;
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- /* This test exists to test descriptors that staddle the end of
- * the UMEM but not a page.
- */
- page_size = sysconf(_SC_PAGESIZE);
- umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
- assert(umem_size % page_size > MIN_PKT_SIZE);
- assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
- ret = testapp_invalid_desc(test);
- break;
- }
- case TEST_TYPE_ALIGNED_INV_DESC_MB:
- test_spec_set_name(test, "ALIGNED_INV_DESC_MULTI_BUFF");
- ret = testapp_invalid_desc_mb(test);
- break;
- case TEST_TYPE_UNALIGNED_INV_DESC_MB:
- test_spec_set_name(test, "UNALIGNED_INV_DESC_MULTI_BUFF");
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- ret = testapp_invalid_desc_mb(test);
- break;
- case TEST_TYPE_UNALIGNED:
- ret = testapp_unaligned(test);
- break;
- case TEST_TYPE_UNALIGNED_MB:
- ret = testapp_unaligned_mb(test);
- break;
- case TEST_TYPE_HEADROOM:
- ret = testapp_headroom(test);
- break;
- case TEST_TYPE_XDP_DROP_HALF:
- ret = testapp_xdp_drop(test);
- break;
- case TEST_TYPE_XDP_METADATA_COUNT:
- test_spec_set_name(test, "XDP_METADATA_COUNT");
- ret = testapp_xdp_metadata_count(test);
- break;
- case TEST_TYPE_XDP_METADATA_COUNT_MB:
- test_spec_set_name(test, "XDP_METADATA_COUNT_MULTI_BUFF");
- test->mtu = MAX_ETH_JUMBO_SIZE;
- ret = testapp_xdp_metadata_count(test);
- break;
- case TEST_TYPE_TOO_MANY_FRAGS:
- ret = testapp_too_many_frags(test);
- break;
- default:
- break;
- }
+static int testapp_send_receive(struct test_spec *test)
+{
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_send_receive_2k_frame(struct test_spec *test)
+{
+ test->ifobj_tx->umem->frame_size = 2048;
+ test->ifobj_rx->umem->frame_size = 2048;
+ pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_poll_rx(struct test_spec *test)
+{
+ test->ifobj_rx->use_poll = true;
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_poll_tx(struct test_spec *test)
+{
+ test->ifobj_tx->use_poll = true;
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_aligned_inv_desc(struct test_spec *test)
+{
+ return testapp_invalid_desc(test);
+}
+
+static int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
+{
+ test->ifobj_tx->umem->frame_size = 2048;
+ test->ifobj_rx->umem->frame_size = 2048;
+ return testapp_invalid_desc(test);
+}
+
+static int testapp_unaligned_inv_desc(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ return testapp_invalid_desc(test);
+}
+
+static int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
+{
+ u64 page_size, umem_size;
+
+ /* Odd frame size so the UMEM doesn't end near a page boundary. */
+ test->ifobj_tx->umem->frame_size = 4001;
+ test->ifobj_rx->umem->frame_size = 4001;
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ /* This test exists to test descriptors that staddle the end of
+ * the UMEM but not a page.
+ */
+ page_size = sysconf(_SC_PAGESIZE);
+ umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
+ assert(umem_size % page_size > MIN_PKT_SIZE);
+ assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
+
+ return testapp_invalid_desc(test);
+}
+
+static int testapp_aligned_inv_desc_mb(struct test_spec *test)
+{
+ return testapp_invalid_desc_mb(test);
+}
+
+static int testapp_unaligned_inv_desc_mb(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ return testapp_invalid_desc_mb(test);
+}
+
+static int testapp_xdp_metadata(struct test_spec *test)
+{
+ return testapp_xdp_metadata_copy(test);
+}
+
+static int testapp_xdp_metadata_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ return testapp_xdp_metadata_copy(test);
+}
+
+static void run_pkt_test(struct test_spec *test)
+{
+ int ret;
+
+ ret = test->test_func(test);
if (ret == TEST_PASS)
ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
@@ -2290,13 +2313,55 @@ static bool is_xdp_supported(int ifindex)
return true;
}
+static const struct test_spec tests[] = {
+ {.name = "SEND_RECEIVE", .test_func = testapp_send_receive},
+ {.name = "SEND_RECEIVE_2K_FRAME", .test_func = testapp_send_receive_2k_frame},
+ {.name = "SEND_RECEIVE_SINGLE_PKT", .test_func = testapp_single_pkt},
+ {.name = "POLL_RX", .test_func = testapp_poll_rx},
+ {.name = "POLL_TX", .test_func = testapp_poll_tx},
+ {.name = "POLL_RXQ_FULL", .test_func = testapp_poll_rxq_tmout},
+ {.name = "POLL_TXQ_FULL", .test_func = testapp_poll_txq_tmout},
+ {.name = "SEND_RECEIVE_UNALIGNED", .test_func = testapp_send_receive_unaligned},
+ {.name = "ALIGNED_INV_DESC", .test_func = testapp_aligned_inv_desc},
+ {.name = "ALIGNED_INV_DESC_2K_FRAME_SIZE", .test_func = testapp_aligned_inv_desc_2k_frame},
+ {.name = "UNALIGNED_INV_DESC", .test_func = testapp_unaligned_inv_desc},
+ {.name = "UNALIGNED_INV_DESC_4001_FRAME_SIZE",
+ .test_func = testapp_unaligned_inv_desc_4001_frame},
+ {.name = "UMEM_HEADROOM", .test_func = testapp_headroom},
+ {.name = "TEARDOWN", .test_func = testapp_teardown},
+ {.name = "BIDIRECTIONAL", .test_func = testapp_bidirectional},
+ {.name = "STAT_RX_DROPPED", .test_func = testapp_stats_rx_dropped},
+ {.name = "STAT_TX_INVALID", .test_func = testapp_stats_tx_invalid_descs},
+ {.name = "STAT_RX_FULL", .test_func = testapp_stats_rx_full},
+ {.name = "STAT_FILL_EMPTY", .test_func = testapp_stats_fill_empty},
+ {.name = "XDP_PROG_CLEANUP", .test_func = testapp_xdp_prog_cleanup},
+ {.name = "XDP_DROP_HALF", .test_func = testapp_xdp_drop},
+ {.name = "XDP_METADATA_COPY", .test_func = testapp_xdp_metadata},
+ {.name = "XDP_METADATA_COPY_MULTI_BUFF", .test_func = testapp_xdp_metadata_mb},
+ {.name = "SEND_RECEIVE_9K_PACKETS", .test_func = testapp_send_receive_mb},
+ {.name = "SEND_RECEIVE_UNALIGNED_9K_PACKETS",
+ .test_func = testapp_send_receive_unaligned_mb},
+ {.name = "ALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_aligned_inv_desc_mb},
+ {.name = "UNALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_unaligned_inv_desc_mb},
+ {.name = "TOO_MANY_FRAGS", .test_func = testapp_too_many_frags},
+};
+
+static void print_tests(void)
+{
+ u32 i;
+
+ printf("Tests:\n");
+ for (i = 0; i < ARRAY_SIZE(tests); i++)
+ printf("%u: %s\n", i, tests[i].name);
+}
+
int main(int argc, char **argv)
{
struct pkt_stream *rx_pkt_stream_default;
struct pkt_stream *tx_pkt_stream_default;
struct ifobject *ifobj_tx, *ifobj_rx;
+ u32 i, j, failed_tests = 0, nb_tests;
int modes = TEST_MODE_SKB + 1;
- u32 i, j, failed_tests = 0;
struct test_spec test;
bool shared_netdev;
@@ -2314,14 +2379,21 @@ int main(int argc, char **argv)
parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
+ if (opt_print_tests) {
+ print_tests();
+ ksft_exit_xpass();
+ }
+ if (opt_run_test != RUN_ALL_TESTS && opt_run_test >= ARRAY_SIZE(tests)) {
+ ksft_print_msg("Error: test %u does not exist.\n", opt_run_test);
+ ksft_exit_xfail();
+ }
+
shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex);
ifobj_tx->shared_umem = shared_netdev;
ifobj_rx->shared_umem = shared_netdev;
- if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
- usage(basename(argv[0]));
- ksft_exit_xfail();
- }
+ if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx))
+ print_usage(argv);
if (is_xdp_supported(ifobj_tx->ifindex)) {
modes++;
@@ -2332,7 +2404,7 @@ int main(int argc, char **argv)
init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx);
init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx);
- test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
+ test_spec_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
if (!tx_pkt_stream_default || !rx_pkt_stream_default)
@@ -2340,12 +2412,35 @@ int main(int argc, char **argv)
test.tx_pkt_stream_default = tx_pkt_stream_default;
test.rx_pkt_stream_default = rx_pkt_stream_default;
- ksft_set_plan(modes * TEST_TYPE_MAX);
+ if (opt_run_test == RUN_ALL_TESTS)
+ nb_tests = ARRAY_SIZE(tests);
+ else
+ nb_tests = 1;
+ if (opt_mode == TEST_MODE_ALL) {
+ ksft_set_plan(modes * nb_tests);
+ } else {
+ if (opt_mode == TEST_MODE_DRV && modes <= TEST_MODE_DRV) {
+ ksft_print_msg("Error: XDP_DRV mode not supported.\n");
+ ksft_exit_xfail();
+ }
+ if (opt_mode == TEST_MODE_ZC && modes <= TEST_MODE_ZC) {
+ ksft_print_msg("Error: zero-copy mode not supported.\n");
+ ksft_exit_xfail();
+ }
+
+ ksft_set_plan(nb_tests);
+ }
for (i = 0; i < modes; i++) {
- for (j = 0; j < TEST_TYPE_MAX; j++) {
- test_spec_init(&test, ifobj_tx, ifobj_rx, i);
- run_pkt_test(&test, i, j);
+ if (opt_mode != TEST_MODE_ALL && i != opt_mode)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(tests); j++) {
+ if (opt_run_test != RUN_ALL_TESTS && j != opt_run_test)
+ continue;
+
+ test_spec_init(&test, ifobj_tx, ifobj_rx, i, &tests[j]);
+ run_pkt_test(&test);
usleep(USLEEP_MAX);
if (test.fail)
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 233b66cef64a..8015aeea839d 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -5,6 +5,8 @@
#ifndef XSKXCEIVER_H_
#define XSKXCEIVER_H_
+#include <limits.h>
+
#include "xsk_xdp_progs.skel.h"
#ifndef SOL_XDP
@@ -34,7 +36,7 @@
#define MAX_INTERFACES 2
#define MAX_INTERFACE_NAME_CHARS 16
#define MAX_SOCKETS 2
-#define MAX_TEST_NAME_SIZE 32
+#define MAX_TEST_NAME_SIZE 48
#define MAX_TEARDOWN_ITER 10
#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2) /* Just to align the data in the packet */
#define MIN_PKT_SIZE 64
@@ -56,6 +58,7 @@
#define XSK_DESC__MAX_SKB_FRAGS 18
#define HUGEPAGE_SIZE (2 * 1024 * 1024)
#define PKT_DUMP_NB_TO_PRINT 16
+#define RUN_ALL_TESTS UINT_MAX
#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
@@ -63,43 +66,9 @@ enum test_mode {
TEST_MODE_SKB,
TEST_MODE_DRV,
TEST_MODE_ZC,
- TEST_MODE_MAX
-};
-
-enum test_type {
- TEST_TYPE_RUN_TO_COMPLETION,
- TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME,
- TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT,
- TEST_TYPE_RX_POLL,
- TEST_TYPE_TX_POLL,
- TEST_TYPE_POLL_RXQ_TMOUT,
- TEST_TYPE_POLL_TXQ_TMOUT,
- TEST_TYPE_UNALIGNED,
- TEST_TYPE_ALIGNED_INV_DESC,
- TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME,
- TEST_TYPE_UNALIGNED_INV_DESC,
- TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME,
- TEST_TYPE_HEADROOM,
- TEST_TYPE_TEARDOWN,
- TEST_TYPE_BIDI,
- TEST_TYPE_STATS_RX_DROPPED,
- TEST_TYPE_STATS_TX_INVALID_DESCS,
- TEST_TYPE_STATS_RX_FULL,
- TEST_TYPE_STATS_FILL_EMPTY,
- TEST_TYPE_BPF_RES,
- TEST_TYPE_XDP_DROP_HALF,
- TEST_TYPE_XDP_METADATA_COUNT,
- TEST_TYPE_XDP_METADATA_COUNT_MB,
- TEST_TYPE_RUN_TO_COMPLETION_MB,
- TEST_TYPE_UNALIGNED_MB,
- TEST_TYPE_ALIGNED_INV_DESC_MB,
- TEST_TYPE_UNALIGNED_INV_DESC_MB,
- TEST_TYPE_TOO_MANY_FRAGS,
- TEST_TYPE_MAX
+ TEST_MODE_ALL
};
-static bool opt_verbose;
-
struct xsk_umem_info {
struct xsk_ring_prod fq;
struct xsk_ring_cons cq;
@@ -139,8 +108,10 @@ struct pkt_stream {
};
struct ifobject;
+struct test_spec;
typedef int (*validation_func_t)(struct ifobject *ifobj);
typedef void *(*thread_func_t)(void *arg);
+typedef int (*test_func_t)(struct test_spec *test);
struct ifobject {
char ifname[MAX_INTERFACE_NAME_CHARS];
@@ -182,6 +153,7 @@ struct test_spec {
struct bpf_program *xdp_prog_tx;
struct bpf_map *xskmap_rx;
struct bpf_map *xskmap_tx;
+ test_func_t test_func;
int mtu;
u16 total_steps;
u16 current_step;
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index b1fc8afd072d..61a2a1988ce6 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -716,7 +716,7 @@ run_test_transparent()
# the required infrastructure in MPTCP sockopt code. To support TOS, the
# following function has been exported (T). Not great but better than
# checking for a specific kernel version.
- if ! mptcp_lib_kallsyms_has "T __ip_sock_set_tos$"; then
+ if ! mptcp_lib_kallsyms_has "T ip_sock_set_tos$"; then
echo "INFO: ${msg} not supported by the kernel: SKIP"
mptcp_lib_result_skip "${TEST_GROUP}"
return
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 488f4964365e..5f2b3f6c0d74 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -31,6 +31,9 @@ ALL_TESTS="
"
devdummy="test-dummy0"
+VERBOSE=0
+PAUSE=no
+PAUSE_ON_FAIL=no
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
@@ -51,35 +54,102 @@ check_fail()
fi
}
+run_cmd_common()
+{
+ local cmd="$*"
+ local out
+ if [ "$VERBOSE" = "1" ]; then
+ echo "COMMAND: ${cmd}"
+ fi
+ out=$($cmd 2>&1)
+ rc=$?
+ if [ "$VERBOSE" = "1" -a -n "$out" ]; then
+ echo " $out"
+ fi
+ return $rc
+}
+
+run_cmd() {
+ run_cmd_common "$@"
+ rc=$?
+ check_err $rc
+ return $rc
+}
+run_cmd_fail()
+{
+ run_cmd_common "$@"
+ rc=$?
+ check_fail $rc
+ return $rc
+}
+
+run_cmd_grep_common()
+{
+ local find="$1"; shift
+ local cmd="$*"
+ local out
+ if [ "$VERBOSE" = "1" ]; then
+ echo "COMMAND: ${cmd} 2>&1 | grep -q '${find}'"
+ fi
+ out=$($cmd 2>&1 | grep -q "${find}" 2>&1)
+ return $?
+}
+
+run_cmd_grep() {
+ run_cmd_grep_common "$@"
+ rc=$?
+ check_err $rc
+ return $rc
+}
+
+run_cmd_grep_fail()
+{
+ run_cmd_grep_common "$@"
+ rc=$?
+ check_fail $rc
+ return $rc
+}
+
+end_test()
+{
+ echo "$*"
+ [ "${VERBOSE}" = "1" ] && echo
+
+ if [[ $ret -ne 0 ]] && [[ "${PAUSE_ON_FAIL}" = "yes" ]]; then
+ echo "Hit enter to continue"
+ read a
+ fi;
+
+ if [ "${PAUSE}" = "yes" ]; then
+ echo "Hit enter to continue"
+ read a
+ fi
+
+}
+
+
kci_add_dummy()
{
- ip link add name "$devdummy" type dummy
- check_err $?
- ip link set "$devdummy" up
- check_err $?
+ run_cmd ip link add name "$devdummy" type dummy
+ run_cmd ip link set "$devdummy" up
}
kci_del_dummy()
{
- ip link del dev "$devdummy"
- check_err $?
+ run_cmd ip link del dev "$devdummy"
}
kci_test_netconf()
{
dev="$1"
r=$ret
-
- ip netconf show dev "$dev" > /dev/null
- check_err $?
-
+ run_cmd ip netconf show dev "$dev"
for f in 4 6; do
- ip -$f netconf show dev "$dev" > /dev/null
- check_err $?
+ run_cmd ip -$f netconf show dev "$dev"
done
if [ $ret -ne 0 ] ;then
- echo "FAIL: ip netconf show $dev"
+ end_test "FAIL: ip netconf show $dev"
test $r -eq 0 && ret=0
return 1
fi
@@ -92,43 +162,27 @@ kci_test_bridge()
vlandev="testbr-vlan1"
local ret=0
- ip link add name "$devbr" type bridge
- check_err $?
-
- ip link set dev "$devdummy" master "$devbr"
- check_err $?
-
- ip link set "$devbr" up
- check_err $?
-
- ip link add link "$devbr" name "$vlandev" type vlan id 1
- check_err $?
- ip addr add dev "$vlandev" 10.200.7.23/30
- check_err $?
- ip -6 addr add dev "$vlandev" dead:42::1234/64
- check_err $?
- ip -d link > /dev/null
- check_err $?
- ip r s t all > /dev/null
- check_err $?
+ run_cmd ip link add name "$devbr" type bridge
+ run_cmd ip link set dev "$devdummy" master "$devbr"
+ run_cmd ip link set "$devbr" up
+ run_cmd ip link add link "$devbr" name "$vlandev" type vlan id 1
+ run_cmd ip addr add dev "$vlandev" 10.200.7.23/30
+ run_cmd ip -6 addr add dev "$vlandev" dead:42::1234/64
+ run_cmd ip -d link
+ run_cmd ip r s t all
for name in "$devbr" "$vlandev" "$devdummy" ; do
kci_test_netconf "$name"
done
-
- ip -6 addr del dev "$vlandev" dead:42::1234/64
- check_err $?
-
- ip link del dev "$vlandev"
- check_err $?
- ip link del dev "$devbr"
- check_err $?
+ run_cmd ip -6 addr del dev "$vlandev" dead:42::1234/64
+ run_cmd ip link del dev "$vlandev"
+ run_cmd ip link del dev "$devbr"
if [ $ret -ne 0 ];then
- echo "FAIL: bridge setup"
+ end_test "FAIL: bridge setup"
return 1
fi
- echo "PASS: bridge setup"
+ end_test "PASS: bridge setup"
}
@@ -139,34 +193,23 @@ kci_test_gre()
loc=10.0.0.1
local ret=0
- ip tunnel add $gredev mode gre remote $rem local $loc ttl 1
- check_err $?
- ip link set $gredev up
- check_err $?
- ip addr add 10.23.7.10 dev $gredev
- check_err $?
- ip route add 10.23.8.0/30 dev $gredev
- check_err $?
- ip addr add dev "$devdummy" 10.23.7.11/24
- check_err $?
- ip link > /dev/null
- check_err $?
- ip addr > /dev/null
- check_err $?
+ run_cmd ip tunnel add $gredev mode gre remote $rem local $loc ttl 1
+ run_cmd ip link set $gredev up
+ run_cmd ip addr add 10.23.7.10 dev $gredev
+ run_cmd ip route add 10.23.8.0/30 dev $gredev
+ run_cmd ip addr add dev "$devdummy" 10.23.7.11/24
+ run_cmd ip link
+ run_cmd ip addr
kci_test_netconf "$gredev"
-
- ip addr del dev "$devdummy" 10.23.7.11/24
- check_err $?
-
- ip link del $gredev
- check_err $?
+ run_cmd ip addr del dev "$devdummy" 10.23.7.11/24
+ run_cmd ip link del $gredev
if [ $ret -ne 0 ];then
- echo "FAIL: gre tunnel endpoint"
+ end_test "FAIL: gre tunnel endpoint"
return 1
fi
- echo "PASS: gre tunnel endpoint"
+ end_test "PASS: gre tunnel endpoint"
}
# tc uses rtnetlink too, for full tc testing
@@ -176,56 +219,40 @@ kci_test_tc()
dev=lo
local ret=0
- tc qdisc add dev "$dev" root handle 1: htb
- check_err $?
- tc class add dev "$dev" parent 1: classid 1:10 htb rate 1mbit
- check_err $?
- tc filter add dev "$dev" parent 1:0 prio 5 handle ffe: protocol ip u32 divisor 256
- check_err $?
- tc filter add dev "$dev" parent 1:0 prio 5 handle ffd: protocol ip u32 divisor 256
- check_err $?
- tc filter add dev "$dev" parent 1:0 prio 5 handle ffc: protocol ip u32 divisor 256
- check_err $?
- tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32 ht ffe:2: match ip src 10.0.0.3 flowid 1:10
- check_err $?
- tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:2 u32 ht ffe:2: match ip src 10.0.0.2 flowid 1:10
- check_err $?
- tc filter show dev "$dev" parent 1:0 > /dev/null
- check_err $?
- tc filter del dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32
- check_err $?
- tc filter show dev "$dev" parent 1:0 > /dev/null
- check_err $?
- tc qdisc del dev "$dev" root handle 1: htb
- check_err $?
+ run_cmd tc qdisc add dev "$dev" root handle 1: htb
+ run_cmd tc class add dev "$dev" parent 1: classid 1:10 htb rate 1mbit
+ run_cmd tc filter add dev "$dev" parent 1:0 prio 5 handle ffe: protocol ip u32 divisor 256
+ run_cmd tc filter add dev "$dev" parent 1:0 prio 5 handle ffd: protocol ip u32 divisor 256
+ run_cmd tc filter add dev "$dev" parent 1:0 prio 5 handle ffc: protocol ip u32 divisor 256
+ run_cmd tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32 ht ffe:2: match ip src 10.0.0.3 flowid 1:10
+ run_cmd tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:2 u32 ht ffe:2: match ip src 10.0.0.2 flowid 1:10
+ run_cmd tc filter show dev "$dev" parent 1:0
+ run_cmd tc filter del dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32
+ run_cmd tc filter show dev "$dev" parent 1:0
+ run_cmd tc qdisc del dev "$dev" root handle 1: htb
if [ $ret -ne 0 ];then
- echo "FAIL: tc htb hierarchy"
+ end_test "FAIL: tc htb hierarchy"
return 1
fi
- echo "PASS: tc htb hierarchy"
+ end_test "PASS: tc htb hierarchy"
}
kci_test_polrouting()
{
local ret=0
- ip rule add fwmark 1 lookup 100
- check_err $?
- ip route add local 0.0.0.0/0 dev lo table 100
- check_err $?
- ip r s t all > /dev/null
- check_err $?
- ip rule del fwmark 1 lookup 100
- check_err $?
- ip route del local 0.0.0.0/0 dev lo table 100
- check_err $?
+ run_cmd ip rule add fwmark 1 lookup 100
+ run_cmd ip route add local 0.0.0.0/0 dev lo table 100
+ run_cmd ip r s t all
+ run_cmd ip rule del fwmark 1 lookup 100
+ run_cmd ip route del local 0.0.0.0/0 dev lo table 100
if [ $ret -ne 0 ];then
- echo "FAIL: policy route test"
+ end_test "FAIL: policy route test"
return 1
fi
- echo "PASS: policy routing"
+ end_test "PASS: policy routing"
}
kci_test_route_get()
@@ -233,65 +260,51 @@ kci_test_route_get()
local hash_policy=$(sysctl -n net.ipv4.fib_multipath_hash_policy)
local ret=0
-
- ip route get 127.0.0.1 > /dev/null
- check_err $?
- ip route get 127.0.0.1 dev "$devdummy" > /dev/null
- check_err $?
- ip route get ::1 > /dev/null
- check_err $?
- ip route get fe80::1 dev "$devdummy" > /dev/null
- check_err $?
- ip route get 127.0.0.1 from 127.0.0.1 oif lo tos 0x10 mark 0x1 > /dev/null
- check_err $?
- ip route get ::1 from ::1 iif lo oif lo tos 0x10 mark 0x1 > /dev/null
- check_err $?
- ip addr add dev "$devdummy" 10.23.7.11/24
- check_err $?
- ip route get 10.23.7.11 from 10.23.7.12 iif "$devdummy" > /dev/null
- check_err $?
- ip route add 10.23.8.0/24 \
+ run_cmd ip route get 127.0.0.1
+ run_cmd ip route get 127.0.0.1 dev "$devdummy"
+ run_cmd ip route get ::1
+ run_cmd ip route get fe80::1 dev "$devdummy"
+ run_cmd ip route get 127.0.0.1 from 127.0.0.1 oif lo tos 0x10 mark 0x1
+ run_cmd ip route get ::1 from ::1 iif lo oif lo tos 0x10 mark 0x1
+ run_cmd ip addr add dev "$devdummy" 10.23.7.11/24
+ run_cmd ip route get 10.23.7.11 from 10.23.7.12 iif "$devdummy"
+ run_cmd ip route add 10.23.8.0/24 \
nexthop via 10.23.7.13 dev "$devdummy" \
nexthop via 10.23.7.14 dev "$devdummy"
- check_err $?
+
sysctl -wq net.ipv4.fib_multipath_hash_policy=0
- ip route get 10.23.8.11 > /dev/null
- check_err $?
+ run_cmd ip route get 10.23.8.11
sysctl -wq net.ipv4.fib_multipath_hash_policy=1
- ip route get 10.23.8.11 > /dev/null
- check_err $?
+ run_cmd ip route get 10.23.8.11
sysctl -wq net.ipv4.fib_multipath_hash_policy="$hash_policy"
- ip route del 10.23.8.0/24
- check_err $?
- ip addr del dev "$devdummy" 10.23.7.11/24
- check_err $?
+ run_cmd ip route del 10.23.8.0/24
+ run_cmd ip addr del dev "$devdummy" 10.23.7.11/24
+
if [ $ret -ne 0 ];then
- echo "FAIL: route get"
+ end_test "FAIL: route get"
return 1
fi
- echo "PASS: route get"
+ end_test "PASS: route get"
}
kci_test_addrlft()
{
for i in $(seq 10 100) ;do
lft=$(((RANDOM%3) + 1))
- ip addr add 10.23.11.$i/32 dev "$devdummy" preferred_lft $lft valid_lft $((lft+1))
- check_err $?
+ run_cmd ip addr add 10.23.11.$i/32 dev "$devdummy" preferred_lft $lft valid_lft $((lft+1))
done
sleep 5
-
- ip addr show dev "$devdummy" | grep "10.23.11."
+ run_cmd_grep "10.23.11." ip addr show dev "$devdummy"
if [ $? -eq 0 ]; then
- echo "FAIL: preferred_lft addresses remaining"
check_err 1
+ end_test "FAIL: preferred_lft addresses remaining"
return
fi
- echo "PASS: preferred_lft addresses have expired"
+ end_test "PASS: preferred_lft addresses have expired"
}
kci_test_promote_secondaries()
@@ -310,27 +323,17 @@ kci_test_promote_secondaries()
[ $promote -eq 0 ] && sysctl -q net.ipv4.conf.$devdummy.promote_secondaries=0
- echo "PASS: promote_secondaries complete"
+ end_test "PASS: promote_secondaries complete"
}
kci_test_addrlabel()
{
local ret=0
-
- ip addrlabel add prefix dead::/64 dev lo label 1
- check_err $?
-
- ip addrlabel list |grep -q "prefix dead::/64 dev lo label 1"
- check_err $?
-
- ip addrlabel del prefix dead::/64 dev lo label 1 2> /dev/null
- check_err $?
-
- ip addrlabel add prefix dead::/64 label 1 2> /dev/null
- check_err $?
-
- ip addrlabel del prefix dead::/64 label 1 2> /dev/null
- check_err $?
+ run_cmd ip addrlabel add prefix dead::/64 dev lo label 1
+ run_cmd_grep "prefix dead::/64 dev lo label 1" ip addrlabel list
+ run_cmd ip addrlabel del prefix dead::/64 dev lo label 1
+ run_cmd ip addrlabel add prefix dead::/64 label 1
+ run_cmd ip addrlabel del prefix dead::/64 label 1
# concurrent add/delete
for i in $(seq 1 1000); do
@@ -346,11 +349,11 @@ kci_test_addrlabel()
ip addrlabel del prefix 1c3::/64 label 12345 2>/dev/null
if [ $ret -ne 0 ];then
- echo "FAIL: ipv6 addrlabel"
+ end_test "FAIL: ipv6 addrlabel"
return 1
fi
- echo "PASS: ipv6 addrlabel"
+ end_test "PASS: ipv6 addrlabel"
}
kci_test_ifalias()
@@ -358,35 +361,28 @@ kci_test_ifalias()
local ret=0
namewant=$(uuidgen)
syspathname="/sys/class/net/$devdummy/ifalias"
-
- ip link set dev "$devdummy" alias "$namewant"
- check_err $?
+ run_cmd ip link set dev "$devdummy" alias "$namewant"
if [ $ret -ne 0 ]; then
- echo "FAIL: cannot set interface alias of $devdummy to $namewant"
+ end_test "FAIL: cannot set interface alias of $devdummy to $namewant"
return 1
fi
-
- ip link show "$devdummy" | grep -q "alias $namewant"
- check_err $?
+ run_cmd_grep "alias $namewant" ip link show "$devdummy"
if [ -r "$syspathname" ] ; then
read namehave < "$syspathname"
if [ "$namewant" != "$namehave" ]; then
- echo "FAIL: did set ifalias $namewant but got $namehave"
+ end_test "FAIL: did set ifalias $namewant but got $namehave"
return 1
fi
namewant=$(uuidgen)
echo "$namewant" > "$syspathname"
- ip link show "$devdummy" | grep -q "alias $namewant"
- check_err $?
+ run_cmd_grep "alias $namewant" ip link show "$devdummy"
# sysfs interface allows to delete alias again
echo "" > "$syspathname"
-
- ip link show "$devdummy" | grep -q "alias $namewant"
- check_fail $?
+ run_cmd_grep_fail "alias $namewant" ip link show "$devdummy"
for i in $(seq 1 100); do
uuidgen > "$syspathname" &
@@ -395,57 +391,48 @@ kci_test_ifalias()
wait
# re-add the alias -- kernel should free mem when dummy dev is removed
- ip link set dev "$devdummy" alias "$namewant"
- check_err $?
+ run_cmd ip link set dev "$devdummy" alias "$namewant"
+
fi
if [ $ret -ne 0 ]; then
- echo "FAIL: set interface alias $devdummy to $namewant"
+ end_test "FAIL: set interface alias $devdummy to $namewant"
return 1
fi
- echo "PASS: set ifalias $namewant for $devdummy"
+ end_test "PASS: set ifalias $namewant for $devdummy"
}
kci_test_vrf()
{
vrfname="test-vrf"
local ret=0
-
- ip link show type vrf 2>/dev/null
+ run_cmd ip link show type vrf
if [ $? -ne 0 ]; then
- echo "SKIP: vrf: iproute2 too old"
+ end_test "SKIP: vrf: iproute2 too old"
return $ksft_skip
fi
-
- ip link add "$vrfname" type vrf table 10
- check_err $?
+ run_cmd ip link add "$vrfname" type vrf table 10
if [ $ret -ne 0 ];then
- echo "FAIL: can't add vrf interface, skipping test"
+ end_test "FAIL: can't add vrf interface, skipping test"
return 0
fi
-
- ip -br link show type vrf | grep -q "$vrfname"
- check_err $?
+ run_cmd_grep "$vrfname" ip -br link show type vrf
if [ $ret -ne 0 ];then
- echo "FAIL: created vrf device not found"
+ end_test "FAIL: created vrf device not found"
return 1
fi
- ip link set dev "$vrfname" up
- check_err $?
-
- ip link set dev "$devdummy" master "$vrfname"
- check_err $?
- ip link del dev "$vrfname"
- check_err $?
+ run_cmd ip link set dev "$vrfname" up
+ run_cmd ip link set dev "$devdummy" master "$vrfname"
+ run_cmd ip link del dev "$vrfname"
if [ $ret -ne 0 ];then
- echo "FAIL: vrf"
+ end_test "FAIL: vrf"
return 1
fi
- echo "PASS: vrf"
+ end_test "PASS: vrf"
}
kci_test_encap_vxlan()
@@ -454,84 +441,44 @@ kci_test_encap_vxlan()
vxlan="test-vxlan0"
vlan="test-vlan0"
testns="$1"
-
- ip -netns "$testns" link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
- dev "$devdummy" dstport 4789 2>/dev/null
+ run_cmd ip -netns "$testns" link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
+ dev "$devdummy" dstport 4789
if [ $? -ne 0 ]; then
- echo "FAIL: can't add vxlan interface, skipping test"
+ end_test "FAIL: can't add vxlan interface, skipping test"
return 0
fi
- check_err $?
- ip -netns "$testns" addr add 10.2.11.49/24 dev "$vxlan"
- check_err $?
-
- ip -netns "$testns" link set up dev "$vxlan"
- check_err $?
-
- ip -netns "$testns" link add link "$vxlan" name "$vlan" type vlan id 1
- check_err $?
+ run_cmd ip -netns "$testns" addr add 10.2.11.49/24 dev "$vxlan"
+ run_cmd ip -netns "$testns" link set up dev "$vxlan"
+ run_cmd ip -netns "$testns" link add link "$vxlan" name "$vlan" type vlan id 1
# changelink testcases
- ip -netns "$testns" link set dev "$vxlan" type vxlan vni 43 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan group ffe5::5 dev "$devdummy" 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan ttl inherit 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan ttl 64
- check_err $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan nolearning
- check_err $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan proxy 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan norsc 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan l2miss 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan l3miss 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan external 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan udpcsum 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumtx 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumrx 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumtx 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumrx 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan gbp 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link set dev "$vxlan" type vxlan gpe 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" link del "$vxlan"
- check_err $?
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan vni 43
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan group ffe5::5 dev "$devdummy"
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan ttl inherit
+
+ run_cmd ip -netns "$testns" link set dev "$vxlan" type vxlan ttl 64
+ run_cmd ip -netns "$testns" link set dev "$vxlan" type vxlan nolearning
+
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan proxy
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan norsc
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan l2miss
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan l3miss
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan external
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan udpcsum
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumtx
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumrx
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumtx
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumrx
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan gbp
+ run_cmd_fail ip -netns "$testns" link set dev "$vxlan" type vxlan gpe
+ run_cmd ip -netns "$testns" link del "$vxlan"
if [ $ret -ne 0 ]; then
- echo "FAIL: vxlan"
+ end_test "FAIL: vxlan"
return 1
fi
- echo "PASS: vxlan"
+ end_test "PASS: vxlan"
}
kci_test_encap_fou()
@@ -539,39 +486,32 @@ kci_test_encap_fou()
local ret=0
name="test-fou"
testns="$1"
-
- ip fou help 2>&1 |grep -q 'Usage: ip fou'
+ run_cmd_grep 'Usage: ip fou' ip fou help
if [ $? -ne 0 ];then
- echo "SKIP: fou: iproute2 too old"
+ end_test "SKIP: fou: iproute2 too old"
return $ksft_skip
fi
if ! /sbin/modprobe -q -n fou; then
- echo "SKIP: module fou is not found"
+ end_test "SKIP: module fou is not found"
return $ksft_skip
fi
/sbin/modprobe -q fou
- ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
+
+ run_cmd ip -netns "$testns" fou add port 7777 ipproto 47
if [ $? -ne 0 ];then
- echo "FAIL: can't add fou port 7777, skipping test"
+ end_test "FAIL: can't add fou port 7777, skipping test"
return 1
fi
-
- ip -netns "$testns" fou add port 8888 ipproto 4
- check_err $?
-
- ip -netns "$testns" fou del port 9999 2>/dev/null
- check_fail $?
-
- ip -netns "$testns" fou del port 7777
- check_err $?
-
+ run_cmd ip -netns "$testns" fou add port 8888 ipproto 4
+ run_cmd_fail ip -netns "$testns" fou del port 9999
+ run_cmd ip -netns "$testns" fou del port 7777
if [ $ret -ne 0 ]; then
- echo "FAIL: fou"
+ end_test "FAIL: fou"s
return 1
fi
- echo "PASS: fou"
+ end_test "PASS: fou"
}
# test various encap methods, use netns to avoid unwanted interference
@@ -579,25 +519,16 @@ kci_test_encap()
{
testns="testns"
local ret=0
-
- ip netns add "$testns"
+ run_cmd ip netns add "$testns"
if [ $? -ne 0 ]; then
- echo "SKIP encap tests: cannot add net namespace $testns"
+ end_test "SKIP encap tests: cannot add net namespace $testns"
return $ksft_skip
fi
-
- ip -netns "$testns" link set lo up
- check_err $?
-
- ip -netns "$testns" link add name "$devdummy" type dummy
- check_err $?
- ip -netns "$testns" link set "$devdummy" up
- check_err $?
-
- kci_test_encap_vxlan "$testns"
- check_err $?
- kci_test_encap_fou "$testns"
- check_err $?
+ run_cmd ip -netns "$testns" link set lo up
+ run_cmd ip -netns "$testns" link add name "$devdummy" type dummy
+ run_cmd ip -netns "$testns" link set "$devdummy" up
+ run_cmd kci_test_encap_vxlan "$testns"
+ run_cmd kci_test_encap_fou "$testns"
ip netns del "$testns"
return $ret
@@ -607,41 +538,28 @@ kci_test_macsec()
{
msname="test_macsec0"
local ret=0
-
- ip macsec help 2>&1 | grep -q "^Usage: ip macsec"
+ run_cmd_grep "^Usage: ip macsec" ip macsec help
if [ $? -ne 0 ]; then
- echo "SKIP: macsec: iproute2 too old"
+ end_test "SKIP: macsec: iproute2 too old"
return $ksft_skip
fi
-
- ip link add link "$devdummy" "$msname" type macsec port 42 encrypt on
- check_err $?
+ run_cmd ip link add link "$devdummy" "$msname" type macsec port 42 encrypt on
if [ $ret -ne 0 ];then
- echo "FAIL: can't add macsec interface, skipping test"
+ end_test "FAIL: can't add macsec interface, skipping test"
return 1
fi
-
- ip macsec add "$msname" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012
- check_err $?
-
- ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef"
- check_err $?
-
- ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on key 00 0123456789abcdef0123456789abcdef
- check_err $?
-
- ip macsec show > /dev/null
- check_err $?
-
- ip link del dev "$msname"
- check_err $?
+ run_cmd ip macsec add "$msname" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012
+ run_cmd ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef"
+ run_cmd ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on key 00 0123456789abcdef0123456789abcdef
+ run_cmd ip macsec show
+ run_cmd ip link del dev "$msname"
if [ $ret -ne 0 ];then
- echo "FAIL: macsec"
+ end_test "FAIL: macsec"
return 1
fi
- echo "PASS: macsec"
+ end_test "PASS: macsec"
}
kci_test_macsec_offload()
@@ -650,19 +568,18 @@ kci_test_macsec_offload()
sysfsnet=/sys/bus/netdevsim/devices/netdevsim0/net/
probed=false
local ret=0
-
- ip macsec help 2>&1 | grep -q "^Usage: ip macsec"
+ run_cmd_grep "^Usage: ip macsec" ip macsec help
if [ $? -ne 0 ]; then
- echo "SKIP: macsec: iproute2 too old"
+ end_test "SKIP: macsec: iproute2 too old"
return $ksft_skip
fi
# setup netdevsim since dummydev doesn't have offload support
if [ ! -w /sys/bus/netdevsim/new_device ] ; then
- modprobe -q netdevsim
- check_err $?
+ run_cmd modprobe -q netdevsim
+
if [ $ret -ne 0 ]; then
- echo "SKIP: macsec_offload can't load netdevsim"
+ end_test "SKIP: macsec_offload can't load netdevsim"
return $ksft_skip
fi
probed=true
@@ -675,43 +592,25 @@ kci_test_macsec_offload()
ip link set $dev up
if [ ! -d $sysfsd ] ; then
- echo "FAIL: macsec_offload can't create device $dev"
+ end_test "FAIL: macsec_offload can't create device $dev"
return 1
fi
-
- ethtool -k $dev | grep -q 'macsec-hw-offload: on'
+ run_cmd_grep 'macsec-hw-offload: on' ethtool -k $dev
if [ $? -eq 1 ] ; then
- echo "FAIL: macsec_offload netdevsim doesn't support MACsec offload"
+ end_test "FAIL: macsec_offload netdevsim doesn't support MACsec offload"
return 1
fi
-
- ip link add link $dev kci_macsec1 type macsec port 4 offload mac
- check_err $?
-
- ip link add link $dev kci_macsec2 type macsec address "aa:bb:cc:dd:ee:ff" port 5 offload mac
- check_err $?
-
- ip link add link $dev kci_macsec3 type macsec sci abbacdde01020304 offload mac
- check_err $?
-
- ip link add link $dev kci_macsec4 type macsec port 8 offload mac 2> /dev/null
- check_fail $?
+ run_cmd ip link add link $dev kci_macsec1 type macsec port 4 offload mac
+ run_cmd ip link add link $dev kci_macsec2 type macsec address "aa:bb:cc:dd:ee:ff" port 5 offload mac
+ run_cmd ip link add link $dev kci_macsec3 type macsec sci abbacdde01020304 offload mac
+ run_cmd_fail ip link add link $dev kci_macsec4 type macsec port 8 offload mac
msname=kci_macsec1
-
- ip macsec add "$msname" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012
- check_err $?
-
- ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef"
- check_err $?
-
- ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on \
+ run_cmd ip macsec add "$msname" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012
+ run_cmd ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef"
+ run_cmd ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on \
key 00 0123456789abcdef0123456789abcdef
- check_err $?
-
- ip macsec add "$msname" rx port 1235 address "1c:ed:de:ad:be:ef" 2> /dev/null
- check_fail $?
-
+ run_cmd_fail ip macsec add "$msname" rx port 1235 address "1c:ed:de:ad:be:ef"
# clean up any leftovers
for msdev in kci_macsec{1,2,3,4} ; do
ip link del $msdev 2> /dev/null
@@ -720,10 +619,10 @@ kci_test_macsec_offload()
$probed && rmmod netdevsim
if [ $ret -ne 0 ]; then
- echo "FAIL: macsec_offload"
+ end_test "FAIL: macsec_offload"
return 1
fi
- echo "PASS: macsec_offload"
+ end_test "PASS: macsec_offload"
}
#-------------------------------------------------------------------
@@ -755,8 +654,7 @@ kci_test_ipsec()
ip addr add $srcip dev $devdummy
# flush to be sure there's nothing configured
- ip x s flush ; ip x p flush
- check_err $?
+ run_cmd ip x s flush ; ip x p flush
# start the monitor in the background
tmpfile=`mktemp /var/run/ipsectestXXX`
@@ -764,72 +662,57 @@ kci_test_ipsec()
sleep 0.2
ipsecid="proto esp src $srcip dst $dstip spi 0x07"
- ip x s add $ipsecid \
+ run_cmd ip x s add $ipsecid \
mode transport reqid 0x07 replay-window 32 \
$algo sel src $srcip/24 dst $dstip/24
- check_err $?
- lines=`ip x s list | grep $srcip | grep $dstip | wc -l`
- test $lines -eq 2
- check_err $?
- ip x s count | grep -q "SAD count 1"
- check_err $?
+ lines=`ip x s list | grep $srcip | grep $dstip | wc -l`
+ run_cmd test $lines -eq 2
+ run_cmd_grep "SAD count 1" ip x s count
lines=`ip x s get $ipsecid | grep $srcip | grep $dstip | wc -l`
- test $lines -eq 2
- check_err $?
-
- ip x s delete $ipsecid
- check_err $?
+ run_cmd test $lines -eq 2
+ run_cmd ip x s delete $ipsecid
lines=`ip x s list | wc -l`
- test $lines -eq 0
- check_err $?
+ run_cmd test $lines -eq 0
ipsecsel="dir out src $srcip/24 dst $dstip/24"
- ip x p add $ipsecsel \
+ run_cmd ip x p add $ipsecsel \
tmpl proto esp src $srcip dst $dstip \
spi 0x07 mode transport reqid 0x07
- check_err $?
+
lines=`ip x p list | grep $srcip | grep $dstip | wc -l`
- test $lines -eq 2
- check_err $?
+ run_cmd test $lines -eq 2
- ip x p count | grep -q "SPD IN 0 OUT 1 FWD 0"
- check_err $?
+ run_cmd_grep "SPD IN 0 OUT 1 FWD 0" ip x p count
lines=`ip x p get $ipsecsel | grep $srcip | grep $dstip | wc -l`
- test $lines -eq 2
- check_err $?
+ run_cmd test $lines -eq 2
- ip x p delete $ipsecsel
- check_err $?
+ run_cmd ip x p delete $ipsecsel
lines=`ip x p list | wc -l`
- test $lines -eq 0
- check_err $?
+ run_cmd test $lines -eq 0
# check the monitor results
kill $mpid
lines=`wc -l $tmpfile | cut "-d " -f1`
- test $lines -eq 20
- check_err $?
+ run_cmd test $lines -eq 20
rm -rf $tmpfile
# clean up any leftovers
- ip x s flush
- check_err $?
- ip x p flush
- check_err $?
+ run_cmd ip x s flush
+ run_cmd ip x p flush
ip addr del $srcip/32 dev $devdummy
if [ $ret -ne 0 ]; then
- echo "FAIL: ipsec"
+ end_test "FAIL: ipsec"
return 1
fi
- echo "PASS: ipsec"
+ end_test "PASS: ipsec"
}
#-------------------------------------------------------------------
@@ -857,10 +740,9 @@ kci_test_ipsec_offload()
# setup netdevsim since dummydev doesn't have offload support
if [ ! -w /sys/bus/netdevsim/new_device ] ; then
- modprobe -q netdevsim
- check_err $?
+ run_cmd modprobe -q netdevsim
if [ $ret -ne 0 ]; then
- echo "SKIP: ipsec_offload can't load netdevsim"
+ end_test "SKIP: ipsec_offload can't load netdevsim"
return $ksft_skip
fi
probed=true
@@ -874,11 +756,11 @@ kci_test_ipsec_offload()
ip addr add $srcip dev $dev
ip link set $dev up
if [ ! -d $sysfsd ] ; then
- echo "FAIL: ipsec_offload can't create device $dev"
+ end_test "FAIL: ipsec_offload can't create device $dev"
return 1
fi
if [ ! -f $sysfsf ] ; then
- echo "FAIL: ipsec_offload netdevsim doesn't support IPsec offload"
+ end_test "FAIL: ipsec_offload netdevsim doesn't support IPsec offload"
return 1
fi
@@ -886,40 +768,39 @@ kci_test_ipsec_offload()
ip x s flush ; ip x p flush
# create offloaded SAs, both in and out
- ip x p add dir out src $srcip/24 dst $dstip/24 \
+ run_cmd ip x p add dir out src $srcip/24 dst $dstip/24 \
tmpl proto esp src $srcip dst $dstip spi 9 \
mode transport reqid 42
- check_err $?
- ip x p add dir in src $dstip/24 dst $srcip/24 \
+
+ run_cmd ip x p add dir in src $dstip/24 dst $srcip/24 \
tmpl proto esp src $dstip dst $srcip spi 9 \
mode transport reqid 42
- check_err $?
- ip x s add proto esp src $srcip dst $dstip spi 9 \
+ run_cmd ip x s add proto esp src $srcip dst $dstip spi 9 \
mode transport reqid 42 $algo sel src $srcip/24 dst $dstip/24 \
offload dev $dev dir out
- check_err $?
- ip x s add proto esp src $dstip dst $srcip spi 9 \
+
+ run_cmd ip x s add proto esp src $dstip dst $srcip spi 9 \
mode transport reqid 42 $algo sel src $dstip/24 dst $srcip/24 \
offload dev $dev dir in
- check_err $?
+
if [ $ret -ne 0 ]; then
- echo "FAIL: ipsec_offload can't create SA"
+ end_test "FAIL: ipsec_offload can't create SA"
return 1
fi
# does offload show up in ip output
lines=`ip x s list | grep -c "crypto offload parameters: dev $dev dir"`
if [ $lines -ne 2 ] ; then
- echo "FAIL: ipsec_offload SA offload missing from list output"
check_err 1
+ end_test "FAIL: ipsec_offload SA offload missing from list output"
fi
# use ping to exercise the Tx path
ping -I $dev -c 3 -W 1 -i 0 $dstip >/dev/null
# does driver have correct offload info
- diff $sysfsf - << EOF
+ run_cmd diff $sysfsf - << EOF
SA count=2 tx=3
sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000
sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
@@ -929,7 +810,7 @@ sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
sa[1] key=0x34333231 38373635 32313039 36353433
EOF
if [ $? -ne 0 ] ; then
- echo "FAIL: ipsec_offload incorrect driver data"
+ end_test "FAIL: ipsec_offload incorrect driver data"
check_err 1
fi
@@ -938,8 +819,8 @@ EOF
ip x p flush
lines=`grep -c "SA count=0" $sysfsf`
if [ $lines -ne 1 ] ; then
- echo "FAIL: ipsec_offload SA not removed from driver"
check_err 1
+ end_test "FAIL: ipsec_offload SA not removed from driver"
fi
# clean up any leftovers
@@ -947,10 +828,10 @@ EOF
$probed && rmmod netdevsim
if [ $ret -ne 0 ]; then
- echo "FAIL: ipsec_offload"
+ end_test "FAIL: ipsec_offload"
return 1
fi
- echo "PASS: ipsec_offload"
+ end_test "PASS: ipsec_offload"
}
kci_test_gretap()
@@ -959,46 +840,38 @@ kci_test_gretap()
DEV_NS=gretap00
local ret=0
- ip netns add "$testns"
+ run_cmd ip netns add "$testns"
if [ $? -ne 0 ]; then
- echo "SKIP gretap tests: cannot add net namespace $testns"
+ end_test "SKIP gretap tests: cannot add net namespace $testns"
return $ksft_skip
fi
- ip link help gretap 2>&1 | grep -q "^Usage:"
+ run_cmd_grep "^Usage:" ip link help gretap
if [ $? -ne 0 ];then
- echo "SKIP: gretap: iproute2 too old"
+ end_test "SKIP: gretap: iproute2 too old"
ip netns del "$testns"
return $ksft_skip
fi
# test native tunnel
- ip -netns "$testns" link add dev "$DEV_NS" type gretap seq \
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" type gretap seq \
key 102 local 172.16.1.100 remote 172.16.1.200
- check_err $?
-
- ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
- check_err $?
- ip -netns "$testns" link set dev $DEV_NS up
- check_err $?
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
+ run_cmd ip -netns "$testns" link set dev $DEV_NS ups
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
# test external mode
- ip -netns "$testns" link add dev "$DEV_NS" type gretap external
- check_err $?
-
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" type gretap external
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
if [ $ret -ne 0 ]; then
- echo "FAIL: gretap"
+ end_test "FAIL: gretap"
ip netns del "$testns"
return 1
fi
- echo "PASS: gretap"
+ end_test "PASS: gretap"
ip netns del "$testns"
}
@@ -1009,46 +882,38 @@ kci_test_ip6gretap()
DEV_NS=ip6gretap00
local ret=0
- ip netns add "$testns"
+ run_cmd ip netns add "$testns"
if [ $? -ne 0 ]; then
- echo "SKIP ip6gretap tests: cannot add net namespace $testns"
+ end_test "SKIP ip6gretap tests: cannot add net namespace $testns"
return $ksft_skip
fi
- ip link help ip6gretap 2>&1 | grep -q "^Usage:"
+ run_cmd_grep "^Usage:" ip link help ip6gretap
if [ $? -ne 0 ];then
- echo "SKIP: ip6gretap: iproute2 too old"
+ end_test "SKIP: ip6gretap: iproute2 too old"
ip netns del "$testns"
return $ksft_skip
fi
# test native tunnel
- ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap seq \
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap seq \
key 102 local fc00:100::1 remote fc00:100::2
- check_err $?
- ip -netns "$testns" addr add dev "$DEV_NS" fc00:200::1/96
- check_err $?
- ip -netns "$testns" link set dev $DEV_NS up
- check_err $?
-
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" addr add dev "$DEV_NS" fc00:200::1/96
+ run_cmd ip -netns "$testns" link set dev $DEV_NS up
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
# test external mode
- ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap external
- check_err $?
-
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap external
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
if [ $ret -ne 0 ]; then
- echo "FAIL: ip6gretap"
+ end_test "FAIL: ip6gretap"
ip netns del "$testns"
return 1
fi
- echo "PASS: ip6gretap"
+ end_test "PASS: ip6gretap"
ip netns del "$testns"
}
@@ -1058,62 +923,47 @@ kci_test_erspan()
testns="testns"
DEV_NS=erspan00
local ret=0
-
- ip link help erspan 2>&1 | grep -q "^Usage:"
+ run_cmd_grep "^Usage:" ip link help erspan
if [ $? -ne 0 ];then
- echo "SKIP: erspan: iproute2 too old"
+ end_test "SKIP: erspan: iproute2 too old"
return $ksft_skip
fi
-
- ip netns add "$testns"
+ run_cmd ip netns add "$testns"
if [ $? -ne 0 ]; then
- echo "SKIP erspan tests: cannot add net namespace $testns"
+ end_test "SKIP erspan tests: cannot add net namespace $testns"
return $ksft_skip
fi
# test native tunnel erspan v1
- ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 1 erspan 488
- check_err $?
- ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
- check_err $?
- ip -netns "$testns" link set dev $DEV_NS up
- check_err $?
-
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
+ run_cmd ip -netns "$testns" link set dev $DEV_NS up
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
# test native tunnel erspan v2
- ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
- check_err $?
-
- ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
- check_err $?
- ip -netns "$testns" link set dev $DEV_NS up
- check_err $?
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
+ run_cmd ip -netns "$testns" link set dev $DEV_NS up
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
# test external mode
- ip -netns "$testns" link add dev "$DEV_NS" type erspan external
- check_err $?
-
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" type erspan external
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
if [ $ret -ne 0 ]; then
- echo "FAIL: erspan"
+ end_test "FAIL: erspan"
ip netns del "$testns"
return 1
fi
- echo "PASS: erspan"
+ end_test "PASS: erspan"
ip netns del "$testns"
}
@@ -1123,63 +973,49 @@ kci_test_ip6erspan()
testns="testns"
DEV_NS=ip6erspan00
local ret=0
-
- ip link help ip6erspan 2>&1 | grep -q "^Usage:"
+ run_cmd_grep "^Usage:" ip link help ip6erspan
if [ $? -ne 0 ];then
- echo "SKIP: ip6erspan: iproute2 too old"
+ end_test "SKIP: ip6erspan: iproute2 too old"
return $ksft_skip
fi
-
- ip netns add "$testns"
+ run_cmd ip netns add "$testns"
if [ $? -ne 0 ]; then
- echo "SKIP ip6erspan tests: cannot add net namespace $testns"
+ end_test "SKIP ip6erspan tests: cannot add net namespace $testns"
return $ksft_skip
fi
# test native tunnel ip6erspan v1
- ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 1 erspan 488
- check_err $?
- ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
- check_err $?
- ip -netns "$testns" link set dev $DEV_NS up
- check_err $?
-
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
+ run_cmd ip -netns "$testns" link set dev $DEV_NS up
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
# test native tunnel ip6erspan v2
- ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
- check_err $?
- ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
- check_err $?
- ip -netns "$testns" link set dev $DEV_NS up
- check_err $?
-
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
+ run_cmd ip -netns "$testns" link set dev $DEV_NS up
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
# test external mode
- ip -netns "$testns" link add dev "$DEV_NS" \
+ run_cmd ip -netns "$testns" link add dev "$DEV_NS" \
type ip6erspan external
- check_err $?
- ip -netns "$testns" link del "$DEV_NS"
- check_err $?
+ run_cmd ip -netns "$testns" link del "$DEV_NS"
if [ $ret -ne 0 ]; then
- echo "FAIL: ip6erspan"
+ end_test "FAIL: ip6erspan"
ip netns del "$testns"
return 1
fi
- echo "PASS: ip6erspan"
+ end_test "PASS: ip6erspan"
ip netns del "$testns"
}
@@ -1195,45 +1031,35 @@ kci_test_fdb_get()
dstip="10.0.2.3"
local ret=0
- bridge fdb help 2>&1 |grep -q 'bridge fdb get'
+ run_cmd_grep 'bridge fdb get' bridge fdb help
if [ $? -ne 0 ];then
- echo "SKIP: fdb get tests: iproute2 too old"
+ end_test "SKIP: fdb get tests: iproute2 too old"
return $ksft_skip
fi
- ip netns add testns
+ run_cmd ip netns add testns
if [ $? -ne 0 ]; then
- echo "SKIP fdb get tests: cannot add net namespace $testns"
+ end_test "SKIP fdb get tests: cannot add net namespace $testns"
return $ksft_skip
fi
-
- $IP link add "$vxlandev" type vxlan id 10 local $localip \
- dstport 4789 2>/dev/null
- check_err $?
- $IP link add name "$brdev" type bridge &>/dev/null
- check_err $?
- $IP link set dev "$vxlandev" master "$brdev" &>/dev/null
- check_err $?
- $BRIDGE fdb add $test_mac dev "$vxlandev" master &>/dev/null
- check_err $?
- $BRIDGE fdb add $test_mac dev "$vxlandev" dst $dstip self &>/dev/null
- check_err $?
-
- $BRIDGE fdb get $test_mac brport "$vxlandev" 2>/dev/null | grep -q "dev $vxlandev master $brdev"
- check_err $?
- $BRIDGE fdb get $test_mac br "$brdev" 2>/dev/null | grep -q "dev $vxlandev master $brdev"
- check_err $?
- $BRIDGE fdb get $test_mac dev "$vxlandev" self 2>/dev/null | grep -q "dev $vxlandev dst $dstip"
- check_err $?
+ run_cmd $IP link add "$vxlandev" type vxlan id 10 local $localip \
+ dstport 4789
+ run_cmd $IP link add name "$brdev" type bridge
+ run_cmd $IP link set dev "$vxlandev" master "$brdev"
+ run_cmd $BRIDGE fdb add $test_mac dev "$vxlandev" master
+ run_cmd $BRIDGE fdb add $test_mac dev "$vxlandev" dst $dstip self
+ run_cmd_grep "dev $vxlandev master $brdev" $BRIDGE fdb get $test_mac brport "$vxlandev"
+ run_cmd_grep "dev $vxlandev master $brdev" $BRIDGE fdb get $test_mac br "$brdev"
+ run_cmd_grep "dev $vxlandev dst $dstip" $BRIDGE fdb get $test_mac dev "$vxlandev" self
ip netns del testns &>/dev/null
if [ $ret -ne 0 ]; then
- echo "FAIL: bridge fdb get"
+ end_test "FAIL: bridge fdb get"
return 1
fi
- echo "PASS: bridge fdb get"
+ end_test "PASS: bridge fdb get"
}
kci_test_neigh_get()
@@ -1243,50 +1069,38 @@ kci_test_neigh_get()
dstip6=dead::2
local ret=0
- ip neigh help 2>&1 |grep -q 'ip neigh get'
+ run_cmd_grep 'ip neigh get' ip neigh help
if [ $? -ne 0 ];then
- echo "SKIP: fdb get tests: iproute2 too old"
+ end_test "SKIP: fdb get tests: iproute2 too old"
return $ksft_skip
fi
# ipv4
- ip neigh add $dstip lladdr $dstmac dev "$devdummy" > /dev/null
- check_err $?
- ip neigh get $dstip dev "$devdummy" 2> /dev/null | grep -q "$dstmac"
- check_err $?
- ip neigh del $dstip lladdr $dstmac dev "$devdummy" > /dev/null
- check_err $?
+ run_cmd ip neigh add $dstip lladdr $dstmac dev "$devdummy"
+ run_cmd_grep "$dstmac" ip neigh get $dstip dev "$devdummy"
+ run_cmd ip neigh del $dstip lladdr $dstmac dev "$devdummy"
# ipv4 proxy
- ip neigh add proxy $dstip dev "$devdummy" > /dev/null
- check_err $?
- ip neigh get proxy $dstip dev "$devdummy" 2>/dev/null | grep -q "$dstip"
- check_err $?
- ip neigh del proxy $dstip dev "$devdummy" > /dev/null
- check_err $?
+ run_cmd ip neigh add proxy $dstip dev "$devdummy"
+ run_cmd_grep "$dstip" ip neigh get proxy $dstip dev "$devdummy"
+ run_cmd ip neigh del proxy $dstip dev "$devdummy"
# ipv6
- ip neigh add $dstip6 lladdr $dstmac dev "$devdummy" > /dev/null
- check_err $?
- ip neigh get $dstip6 dev "$devdummy" 2> /dev/null | grep -q "$dstmac"
- check_err $?
- ip neigh del $dstip6 lladdr $dstmac dev "$devdummy" > /dev/null
- check_err $?
+ run_cmd ip neigh add $dstip6 lladdr $dstmac dev "$devdummy"
+ run_cmd_grep "$dstmac" ip neigh get $dstip6 dev "$devdummy"
+ run_cmd ip neigh del $dstip6 lladdr $dstmac dev "$devdummy"
# ipv6 proxy
- ip neigh add proxy $dstip6 dev "$devdummy" > /dev/null
- check_err $?
- ip neigh get proxy $dstip6 dev "$devdummy" 2>/dev/null | grep -q "$dstip6"
- check_err $?
- ip neigh del proxy $dstip6 dev "$devdummy" > /dev/null
- check_err $?
+ run_cmd ip neigh add proxy $dstip6 dev "$devdummy"
+ run_cmd_grep "$dstip6" ip neigh get proxy $dstip6 dev "$devdummy"
+ run_cmd ip neigh del proxy $dstip6 dev "$devdummy"
if [ $ret -ne 0 ];then
- echo "FAIL: neigh get"
+ end_test "FAIL: neigh get"
return 1
fi
- echo "PASS: neigh get"
+ end_test "PASS: neigh get"
}
kci_test_bridge_parent_id()
@@ -1296,10 +1110,9 @@ kci_test_bridge_parent_id()
probed=false
if [ ! -w /sys/bus/netdevsim/new_device ] ; then
- modprobe -q netdevsim
- check_err $?
+ run_cmd modprobe -q netdevsim
if [ $ret -ne 0 ]; then
- echo "SKIP: bridge_parent_id can't load netdevsim"
+ end_test "SKIP: bridge_parent_id can't load netdevsim"
return $ksft_skip
fi
probed=true
@@ -1312,13 +1125,11 @@ kci_test_bridge_parent_id()
udevadm settle
dev10=`ls ${sysfsnet}10/net/`
dev20=`ls ${sysfsnet}20/net/`
-
- ip link add name test-bond0 type bond mode 802.3ad
- ip link set dev $dev10 master test-bond0
- ip link set dev $dev20 master test-bond0
- ip link add name test-br0 type bridge
- ip link set dev test-bond0 master test-br0
- check_err $?
+ run_cmd ip link add name test-bond0 type bond mode 802.3ad
+ run_cmd ip link set dev $dev10 master test-bond0
+ run_cmd ip link set dev $dev20 master test-bond0
+ run_cmd ip link add name test-br0 type bridge
+ run_cmd ip link set dev test-bond0 master test-br0
# clean up any leftovers
ip link del dev test-br0
@@ -1328,10 +1139,10 @@ kci_test_bridge_parent_id()
$probed && rmmod netdevsim
if [ $ret -ne 0 ]; then
- echo "FAIL: bridge_parent_id"
+ end_test "FAIL: bridge_parent_id"
return 1
fi
- echo "PASS: bridge_parent_id"
+ end_test "PASS: bridge_parent_id"
}
address_get_proto()
@@ -1409,10 +1220,10 @@ do_test_address_proto()
ip address del dev "$devdummy" "$addr3"
if [ $ret -ne 0 ]; then
- echo "FAIL: address proto $what"
+ end_test "FAIL: address proto $what"
return 1
fi
- echo "PASS: address proto $what"
+ end_test "PASS: address proto $what"
}
kci_test_address_proto()
@@ -1435,7 +1246,7 @@ kci_test_rtnl()
kci_add_dummy
if [ $ret -ne 0 ];then
- echo "FAIL: cannot add dummy interface"
+ end_test "FAIL: cannot add dummy interface"
return 1
fi
@@ -1455,31 +1266,39 @@ usage: ${0##*/} OPTS
-t <test> Test(s) to run (default: all)
(options: $(echo $ALL_TESTS))
+ -v Verbose mode (show commands and output)
+ -P Pause after every test
+ -p Pause after every failing test before cleanup (for debugging)
EOF
}
#check for needed privileges
if [ "$(id -u)" -ne 0 ];then
- echo "SKIP: Need root privileges"
+ end_test "SKIP: Need root privileges"
exit $ksft_skip
fi
for x in ip tc;do
$x -Version 2>/dev/null >/dev/null
if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without the $x tool"
+ end_test "SKIP: Could not run test without the $x tool"
exit $ksft_skip
fi
done
-while getopts t:h o; do
+while getopts t:hvpP o; do
case $o in
t) TESTS=$OPTARG;;
+ v) VERBOSE=1;;
+ p) PAUSE_ON_FAIL=yes;;
+ P) PAUSE=yes;;
h) usage; exit 0;;
*) usage; exit 1;;
esac
done
+[ $PAUSE = "yes" ] && PAUSE_ON_FAIL="no"
+
kci_test_rtnl
exit $?
diff --git a/tools/testing/selftests/netfilter/nf_nat_edemux.sh b/tools/testing/selftests/netfilter/nf_nat_edemux.sh
index 1092bbcb1fba..a1aa8f4a5828 100755
--- a/tools/testing/selftests/netfilter/nf_nat_edemux.sh
+++ b/tools/testing/selftests/netfilter/nf_nat_edemux.sh
@@ -11,16 +11,18 @@ ret=0
sfx=$(mktemp -u "XXXXXXXX")
ns1="ns1-$sfx"
ns2="ns2-$sfx"
+socatpid=0
cleanup()
{
+ [ $socatpid -gt 0 ] && kill $socatpid
ip netns del $ns1
ip netns del $ns2
}
-iperf3 -v > /dev/null 2>&1
+socat -h > /dev/null 2>&1
if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without iperf3"
+ echo "SKIP: Could not run test without socat"
exit $ksft_skip
fi
@@ -60,8 +62,8 @@ ip netns exec $ns2 ip link set up dev veth2
ip netns exec $ns2 ip addr add 192.168.1.2/24 dev veth2
# Create a server in one namespace
-ip netns exec $ns1 iperf3 -s > /dev/null 2>&1 &
-iperfs=$!
+ip netns exec $ns1 socat -u TCP-LISTEN:5201,fork OPEN:/dev/null,wronly=1 &
+socatpid=$!
# Restrict source port to just one so we don't have to exhaust
# all others.
@@ -83,17 +85,43 @@ sleep 1
# ip daddr:dport will be rewritten to 192.168.1.1 5201
# NAT must reallocate source port 10000 because
# 192.168.1.2:10000 -> 192.168.1.1:5201 is already in use
-echo test | ip netns exec $ns2 socat -t 3 -u STDIN TCP:10.96.0.1:443 >/dev/null
+echo test | ip netns exec $ns2 socat -t 3 -u STDIN TCP:10.96.0.1:443,connect-timeout=3 >/dev/null
ret=$?
-kill $iperfs
-
# Check socat can connect to 10.96.0.1:443 (aka 192.168.1.1:5201).
if [ $ret -eq 0 ]; then
echo "PASS: socat can connect via NAT'd address"
else
echo "FAIL: socat cannot connect via NAT'd address"
- exit 1
fi
-exit 0
+# check sport clashres.
+ip netns exec $ns1 iptables -t nat -A PREROUTING -p tcp --dport 5202 -j REDIRECT --to-ports 5201
+ip netns exec $ns1 iptables -t nat -A PREROUTING -p tcp --dport 5203 -j REDIRECT --to-ports 5201
+
+sleep 5 | ip netns exec $ns2 socat -t 5 -u STDIN TCP:192.168.1.1:5202,connect-timeout=5 >/dev/null &
+cpid1=$!
+sleep 1
+
+# if connect succeeds, client closes instantly due to EOF on stdin.
+# if connect hangs, it will time out after 5s.
+echo | ip netns exec $ns2 socat -t 3 -u STDIN TCP:192.168.1.1:5203,connect-timeout=5 >/dev/null &
+cpid2=$!
+
+time_then=$(date +%s)
+wait $cpid2
+rv=$?
+time_now=$(date +%s)
+
+# Check how much time has elapsed, expectation is for
+# 'cpid2' to connect and then exit (and no connect delay).
+delta=$((time_now - time_then))
+
+if [ $delta -lt 2 -a $rv -eq 0 ]; then
+ echo "PASS: could connect to service via redirected ports"
+else
+ echo "FAIL: socat cannot connect to service via redirect ($delta seconds elapsed, returned $rv)"
+ ret=1
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/tc-testing/README b/tools/testing/selftests/tc-testing/README
index b0954c873e2f..be7b00799b3e 100644
--- a/tools/testing/selftests/tc-testing/README
+++ b/tools/testing/selftests/tc-testing/README
@@ -9,8 +9,7 @@ execute them inside a network namespace dedicated to the task.
REQUIREMENTS
------------
-* Minimum Python version of 3.4. Earlier 3.X versions may work but are not
- guaranteed.
+* Minimum Python version of 3.8.
* The kernel must have network namespace support if using nsPlugin
@@ -96,6 +95,15 @@ the stdout with a regular expression.
Each of the commands in any stage will run in a shell instance.
+Each test is an atomic unit. A test that for whatever reason spans multiple test
+definitions is a bug.
+
+A test that runs inside a namespace (requires "nsPlugin") will run in parallel
+with other tests.
+
+Tests that use netdevsim or don't run inside a namespace run serially with regards
+to each other.
+
USER-DEFINED CONSTANTS
----------------------
@@ -116,59 +124,6 @@ COMMAND LINE ARGUMENTS
Run tdc.py -h to see the full list of available arguments.
-usage: tdc.py [-h] [-p PATH] [-D DIR [DIR ...]] [-f FILE [FILE ...]]
- [-c [CATG [CATG ...]]] [-e ID [ID ...]] [-l] [-s] [-i] [-v] [-N]
- [-d DEVICE] [-P] [-n] [-V]
-
-Linux TC unit tests
-
-optional arguments:
- -h, --help show this help message and exit
- -p PATH, --path PATH The full path to the tc executable to use
- -v, --verbose Show the commands that are being run
- -N, --notap Suppress tap results for command under test
- -d DEVICE, --device DEVICE
- Execute test cases that use a physical device, where
- DEVICE is its name. (If not defined, tests that require
- a physical device will be skipped)
- -P, --pause Pause execution just before post-suite stage
-
-selection:
- select which test cases: files plus directories; filtered by categories
- plus testids
-
- -D DIR [DIR ...], --directory DIR [DIR ...]
- Collect tests from the specified directory(ies)
- (default [tc-tests])
- -f FILE [FILE ...], --file FILE [FILE ...]
- Run tests from the specified file(s)
- -c [CATG [CATG ...]], --category [CATG [CATG ...]]
- Run tests only from the specified category/ies, or if
- no category/ies is/are specified, list known
- categories.
- -e ID [ID ...], --execute ID [ID ...]
- Execute the specified test cases with specified IDs
-
-action:
- select action to perform on selected test cases
-
- -l, --list List all test cases, or those only within the
- specified category
- -s, --show Display the selected test cases
- -i, --id Generate ID numbers for new test cases
-
-netns:
- options for nsPlugin (run commands in net namespace)
-
- -N, --no-namespace
- Do not run commands in a network namespace.
-
-valgrind:
- options for valgrindPlugin (run command under test under Valgrind)
-
- -V, --valgrind Run commands under valgrind
-
-
PLUGIN ARCHITECTURE
-------------------
diff --git a/tools/testing/selftests/tc-testing/TdcPlugin.py b/tools/testing/selftests/tc-testing/TdcPlugin.py
index 79f3ca8617c9..aae85ce4f776 100644
--- a/tools/testing/selftests/tc-testing/TdcPlugin.py
+++ b/tools/testing/selftests/tc-testing/TdcPlugin.py
@@ -5,10 +5,10 @@ class TdcPlugin:
super().__init__()
print(' -- {}.__init__'.format(self.sub_class))
- def pre_suite(self, testcount, testidlist):
+ def pre_suite(self, testcount, testlist):
'''run commands before test_runner goes into a test loop'''
self.testcount = testcount
- self.testidlist = testidlist
+ self.testlist = testlist
if self.args.verbose > 1:
print(' -- {}.pre_suite'.format(self.sub_class))
diff --git a/tools/testing/selftests/tc-testing/TdcResults.py b/tools/testing/selftests/tc-testing/TdcResults.py
index 1e4d95fdf8d0..e56817b97f08 100644
--- a/tools/testing/selftests/tc-testing/TdcResults.py
+++ b/tools/testing/selftests/tc-testing/TdcResults.py
@@ -59,7 +59,8 @@ class TestResult:
return self.steps
class TestSuiteReport():
- _testsuite = []
+ def __init__(self):
+ self._testsuite = []
def add_resultdata(self, result_data):
if isinstance(result_data, TestResult):
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
index 9539cffa9e5e..b62429b0fcdb 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
@@ -3,35 +3,96 @@ import signal
from string import Template
import subprocess
import time
+from multiprocessing import Pool
+from functools import cached_property
from TdcPlugin import TdcPlugin
from tdc_config import *
+def prepare_suite(obj, test):
+ original = obj.args.NAMES
+
+ if 'skip' in test and test['skip'] == 'yes':
+ return
+
+ if 'nsPlugin' not in test['plugins']:
+ return
+
+ shadow = {}
+ shadow['IP'] = original['IP']
+ shadow['TC'] = original['TC']
+ shadow['NS'] = '{}-{}'.format(original['NS'], test['random'])
+ shadow['DEV0'] = '{}id{}'.format(original['DEV0'], test['id'])
+ shadow['DEV1'] = '{}id{}'.format(original['DEV1'], test['id'])
+ shadow['DUMMY'] = '{}id{}'.format(original['DUMMY'], test['id'])
+ shadow['DEV2'] = original['DEV2']
+ obj.args.NAMES = shadow
+
+ if obj.args.namespace:
+ obj._ns_create()
+ else:
+ obj._ports_create()
+
+ # Make sure the netns is visible in the fs
+ while True:
+ obj._proc_check()
+ try:
+ ns = obj.args.NAMES['NS']
+ f = open('/run/netns/{}'.format(ns))
+ f.close()
+ break
+ except:
+ time.sleep(0.1)
+ continue
+
+ obj.args.NAMES = original
+
class SubPlugin(TdcPlugin):
def __init__(self):
self.sub_class = 'ns/SubPlugin'
super().__init__()
- def pre_suite(self, testcount, testidlist):
- '''run commands before test_runner goes into a test loop'''
- super().pre_suite(testcount, testidlist)
+ def pre_suite(self, testcount, testlist):
+ from itertools import cycle
- if self.args.namespace:
- self._ns_create()
- else:
- self._ports_create()
+ super().pre_suite(testcount, testlist)
- def post_suite(self, index):
- '''run commands after test_runner goes into a test loop'''
- super().post_suite(index)
+ print("Setting up namespaces and devices...")
+
+ with Pool(self.args.mp) as p:
+ it = zip(cycle([self]), testlist)
+ p.starmap(prepare_suite, it)
+
+ def pre_case(self, caseinfo, test_skip):
if self.args.verbose:
- print('{}.post_suite'.format(self.sub_class))
+ print('{}.pre_case'.format(self.sub_class))
+
+ if test_skip:
+ return
+
+
+ def post_case(self):
+ if self.args.verbose:
+ print('{}.post_case'.format(self.sub_class))
if self.args.namespace:
self._ns_destroy()
else:
self._ports_destroy()
+ def post_suite(self, index):
+ if self.args.verbose:
+ print('{}.post_suite'.format(self.sub_class))
+
+ # Make sure we don't leak resources
+ for f in os.listdir('/run/netns/'):
+ cmd = self._replace_keywords("$IP netns del {}".format(f))
+
+ if self.args.verbose > 3:
+ print('_exec_cmd: command "{}"'.format(cmd))
+
+ subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+
def add_args(self, parser):
super().add_args(parser)
self.argparser_group = self.argparser.add_argument_group(
@@ -77,18 +138,43 @@ class SubPlugin(TdcPlugin):
print('adjust_command: return command [{}]'.format(command))
return command
- def _ports_create(self):
- cmd = '$IP link add $DEV0 type veth peer name $DEV1'
- self._exec_cmd('pre', cmd)
- cmd = '$IP link set $DEV0 up'
- self._exec_cmd('pre', cmd)
+ def _ports_create_cmds(self):
+ cmds = []
+
+ cmds.append(self._replace_keywords('link add $DEV0 type veth peer name $DEV1'))
+ cmds.append(self._replace_keywords('link set $DEV0 up'))
+ cmds.append(self._replace_keywords('link add $DUMMY type dummy'))
if not self.args.namespace:
- cmd = '$IP link set $DEV1 up'
- self._exec_cmd('pre', cmd)
+ cmds.append(self._replace_keywords('link set $DEV1 up'))
+
+ return cmds
+
+ def _ports_create(self):
+ self._exec_cmd_batched('pre', self._ports_create_cmds())
+
+ def _ports_destroy_cmd(self):
+ return self._replace_keywords('link del $DEV0')
def _ports_destroy(self):
- cmd = '$IP link del $DEV0'
- self._exec_cmd('post', cmd)
+ self._exec_cmd('post', self._ports_destroy_cmd())
+
+ def _ns_create_cmds(self):
+ cmds = []
+
+ if self.args.namespace:
+ ns = self.args.NAMES['NS']
+
+ cmds.append(self._replace_keywords('netns add {}'.format(ns)))
+ cmds.append(self._replace_keywords('link set $DEV1 netns {}'.format(ns)))
+ cmds.append(self._replace_keywords('link set $DUMMY netns {}'.format(ns)))
+ cmds.append(self._replace_keywords('netns exec {} $IP link set $DEV1 up'.format(ns)))
+ cmds.append(self._replace_keywords('netns exec {} $IP link set $DUMMY up'.format(ns)))
+
+ if self.args.device:
+ cmds.append(self._replace_keywords('link set $DEV2 netns {}'.format(ns)))
+ cmds.append(self._replace_keywords('netns exec {} $IP link set $DEV2 up'.format(ns)))
+
+ return cmds
def _ns_create(self):
'''
@@ -96,18 +182,10 @@ class SubPlugin(TdcPlugin):
the required network devices for it.
'''
self._ports_create()
- if self.args.namespace:
- cmd = '$IP netns add {}'.format(self.args.NAMES['NS'])
- self._exec_cmd('pre', cmd)
- cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS'])
- self._exec_cmd('pre', cmd)
- cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS'])
- self._exec_cmd('pre', cmd)
- if self.args.device:
- cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS'])
- self._exec_cmd('pre', cmd)
- cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS'])
- self._exec_cmd('pre', cmd)
+ self._exec_cmd_batched('pre', self._ns_create_cmds())
+
+ def _ns_destroy_cmd(self):
+ return self._replace_keywords('netns delete {}'.format(self.args.NAMES['NS']))
def _ns_destroy(self):
'''
@@ -115,35 +193,49 @@ class SubPlugin(TdcPlugin):
devices as well)
'''
if self.args.namespace:
- cmd = '$IP netns delete {}'.format(self.args.NAMES['NS'])
- self._exec_cmd('post', cmd)
+ self._exec_cmd('post', self._ns_destroy_cmd())
+ self._ports_destroy()
+
+ @cached_property
+ def _proc(self):
+ ip = self._replace_keywords("$IP -b -")
+ proc = subprocess.Popen(ip,
+ shell=True,
+ stdin=subprocess.PIPE,
+ env=ENVIR)
+
+ return proc
+
+ def _proc_check(self):
+ proc = self._proc
+
+ proc.poll()
+
+ if proc.returncode is not None and proc.returncode != 0:
+ raise RuntimeError("iproute2 exited with an error code")
def _exec_cmd(self, stage, command):
'''
Perform any required modifications on an executable command, then run
it in a subprocess and return the results.
'''
- if '$' in command:
- command = self._replace_keywords(command)
- self.adjust_command(stage, command)
- if self.args.verbose:
+ if self.args.verbose > 3:
print('_exec_cmd: command "{}"'.format(command))
- proc = subprocess.Popen(command,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=ENVIR)
- (rawout, serr) = proc.communicate()
- if proc.returncode != 0 and len(serr) > 0:
- foutput = serr.decode("utf-8")
- else:
- foutput = rawout.decode("utf-8")
+ proc = self._proc
+
+ proc.stdin.write((command + '\n').encode())
+ proc.stdin.flush()
+
+ if self.args.verbose > 3:
+ print('_exec_cmd proc: {}'.format(proc))
+
+ self._proc_check()
- proc.stdout.close()
- proc.stderr.close()
- return proc, foutput
+ def _exec_cmd_batched(self, stage, commands):
+ for cmd in commands:
+ self._exec_cmd(stage, cmd)
def _replace_keywords(self, cmd):
"""
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/rootPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/rootPlugin.py
index e36775bd4d12..8762c0f4a095 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/rootPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/rootPlugin.py
@@ -10,9 +10,9 @@ class SubPlugin(TdcPlugin):
self.sub_class = 'root/SubPlugin'
super().__init__()
- def pre_suite(self, testcount, testidlist):
+ def pre_suite(self, testcount, testlist):
# run commands before test_runner goes into a test loop
- super().pre_suite(testcount, testidlist)
+ super().pre_suite(testcount, testlist)
if os.geteuid():
print('This script must be run with root privileges', file=sys.stderr)
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
index 4bb866575ea1..c6f61649c430 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
@@ -25,9 +25,10 @@ class SubPlugin(TdcPlugin):
self._tsr = TestSuiteReport()
super().__init__()
- def pre_suite(self, testcount, testidlist):
+ def pre_suite(self, testcount, testist):
'''run commands before test_runner goes into a test loop'''
- super().pre_suite(testcount, testidlist)
+ self.testidlist = [tidx['id'] for tidx in testlist]
+ super().pre_suite(testcount, testlist)
if self.args.verbose > 1:
print('{}.pre_suite'.format(self.sub_class))
if self.args.valgrind:
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
index 0de2f79ea329..3d0f9310bde4 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
@@ -6,6 +6,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -30,6 +33,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -54,6 +60,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -78,6 +87,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -102,6 +114,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -126,6 +141,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -150,6 +168,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -174,6 +195,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -198,6 +222,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -222,6 +249,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -246,6 +276,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -271,6 +304,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -295,6 +331,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -320,6 +359,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
@@ -345,6 +387,9 @@
"actions",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action connmark",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
index 072febf25f55..56e11136d0f6 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
@@ -6,6 +6,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -30,6 +33,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -54,6 +60,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -78,6 +87,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -102,6 +114,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -126,6 +141,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -150,6 +168,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -174,6 +195,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -198,6 +222,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -222,6 +249,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -246,6 +276,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -270,6 +303,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -294,6 +330,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -318,6 +357,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -342,6 +384,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -366,6 +411,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -390,6 +438,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -414,6 +465,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -438,6 +492,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -461,6 +518,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -485,6 +545,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -508,6 +571,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
@@ -533,6 +599,9 @@
"actions",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action csum",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
index bd843ab00a58..7d07c55bb668 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
@@ -6,6 +6,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -30,6 +33,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -54,6 +60,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -78,6 +87,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -102,6 +114,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -126,6 +141,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -150,6 +168,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -174,6 +195,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -198,6 +222,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -222,6 +249,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -246,6 +276,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -270,6 +303,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -294,6 +330,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -318,6 +357,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -342,6 +384,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -366,6 +411,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -390,6 +438,9 @@
"actions",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ct",
@@ -415,6 +466,9 @@
"ct",
"scapy"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"plugins": {
"requires": [
"nsPlugin",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ctinfo.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ctinfo.json
index d9710c067eb7..bb54d71241a0 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ctinfo.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ctinfo.json
@@ -6,6 +6,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC action flush action ctinfo",
@@ -30,6 +33,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ctinfo",
@@ -54,6 +60,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC action flush action ctinfo",
@@ -78,6 +87,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC action flush action ctinfo",
@@ -102,6 +114,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ctinfo",
@@ -132,6 +147,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ctinfo",
@@ -162,6 +180,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ctinfo",
@@ -192,6 +213,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC action flush action ctinfo",
@@ -219,6 +243,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ctinfo",
@@ -246,6 +273,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ctinfo",
@@ -271,6 +301,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ctinfo",
@@ -295,6 +328,9 @@
"actions",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ctinfo",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
index c652e8c1157d..0fcd52742939 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -6,6 +6,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -30,6 +33,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -54,6 +60,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -78,6 +87,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -102,6 +114,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -126,6 +141,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -150,6 +168,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -175,6 +196,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -199,6 +223,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -223,6 +250,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -252,6 +282,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC actions add action reclassify index 101",
"$TC actions add action reclassify index 102",
@@ -273,6 +306,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -298,6 +334,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -323,6 +362,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -348,6 +390,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -373,6 +418,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -398,6 +446,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -422,6 +473,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -448,6 +502,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -473,6 +530,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -497,6 +557,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -521,6 +584,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -544,6 +610,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -568,6 +637,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
@@ -593,6 +665,9 @@
"actions",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gact",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gate.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gate.json
index e16a4963fdd2..db645c22ad7b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gate.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gate.json
@@ -6,6 +6,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC action flush action gate",
@@ -30,6 +33,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gate",
@@ -54,6 +60,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC action flush action gate",
@@ -78,6 +87,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC action flush action gate",
@@ -102,6 +114,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gate",
@@ -132,6 +147,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gate",
@@ -162,6 +180,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gate",
@@ -192,6 +213,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC action flush action gate",
@@ -219,6 +243,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gate",
@@ -246,6 +273,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gate",
@@ -271,6 +301,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gate",
@@ -295,6 +328,9 @@
"actions",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action gate",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 459bcad35810..f587a32e44c4 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -6,6 +6,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -30,6 +33,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -54,6 +60,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -78,6 +87,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -102,6 +114,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -126,6 +141,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -150,6 +168,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -174,6 +195,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -196,6 +220,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -220,6 +247,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -244,6 +274,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -268,6 +301,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -292,6 +328,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -316,6 +355,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -340,6 +382,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -364,6 +409,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -386,6 +434,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -410,6 +461,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -434,6 +488,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -458,6 +515,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -482,6 +542,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -506,6 +569,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -530,6 +596,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -554,6 +623,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -578,6 +650,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -600,6 +675,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -624,6 +702,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -648,6 +729,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -672,6 +756,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -696,6 +783,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -720,6 +810,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -744,6 +837,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -768,6 +864,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -792,6 +891,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -816,6 +918,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -840,6 +945,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -864,6 +972,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -888,6 +999,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -912,6 +1026,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -934,6 +1051,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -956,6 +1076,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -980,6 +1103,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -1002,6 +1128,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -1024,6 +1153,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -1046,6 +1178,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -1068,6 +1203,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -1093,6 +1231,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
@@ -1118,6 +1259,9 @@
"actions",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action ife",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
index 12a2fe0e1472..b53d12909962 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -6,6 +6,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -30,6 +33,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -55,6 +61,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -81,6 +90,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -105,6 +117,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -129,6 +144,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -153,6 +171,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -178,6 +199,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -202,6 +226,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -226,6 +253,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -250,6 +280,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -274,6 +307,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -298,6 +334,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -322,6 +361,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -346,6 +388,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -370,6 +415,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -392,6 +440,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -417,6 +468,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -442,6 +496,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -467,6 +524,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -491,6 +551,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -514,6 +577,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -538,6 +604,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
@@ -561,6 +630,9 @@
"actions",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mirred",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json
index 866f0efd0859..b1c5dd27a70d 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json
@@ -6,6 +6,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -30,6 +33,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -54,6 +60,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -78,6 +87,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -102,6 +114,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -126,6 +141,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -150,6 +168,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -174,6 +195,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -198,6 +222,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -222,6 +249,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -244,6 +274,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -266,6 +299,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -288,6 +324,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -310,6 +349,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -332,6 +374,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -356,6 +401,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -380,6 +428,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -404,6 +455,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -426,6 +480,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -448,6 +505,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -470,6 +530,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -492,6 +555,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -514,6 +580,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -538,6 +607,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -562,6 +634,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -586,6 +661,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -610,6 +688,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -634,6 +715,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -656,6 +740,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -678,6 +765,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -700,6 +790,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -722,6 +815,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -744,6 +840,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -768,6 +867,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -792,6 +894,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -814,6 +919,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -836,6 +944,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -860,6 +971,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -884,6 +998,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -906,6 +1023,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -930,6 +1050,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -954,6 +1077,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -978,6 +1104,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1002,6 +1131,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1024,6 +1156,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1046,6 +1181,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1070,6 +1208,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1094,6 +1235,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1116,6 +1260,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1138,6 +1285,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1163,6 +1313,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1188,6 +1341,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
@@ -1211,6 +1367,9 @@
"actions",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action mpls",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
index 0a3c491edbc5..ee2792998c89 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
@@ -6,6 +6,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -30,6 +33,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -54,6 +60,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -78,6 +87,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -102,6 +114,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -126,6 +141,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -150,6 +168,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -174,6 +195,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -203,6 +227,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -232,6 +259,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -261,6 +291,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -285,6 +318,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -309,6 +345,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -333,6 +372,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -357,6 +399,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -381,6 +426,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -405,6 +453,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -429,6 +480,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -453,6 +507,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -477,6 +534,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -501,6 +561,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -525,6 +588,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -549,6 +615,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -573,6 +642,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -597,6 +669,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -622,6 +697,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
@@ -647,6 +725,9 @@
"actions",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action nat",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
index 72cdc3c800a5..37c410332174 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
@@ -6,6 +6,9 @@
"actions",
"pedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -30,6 +33,9 @@
"actions",
"pedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -56,6 +62,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -81,6 +90,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -106,6 +118,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -131,6 +146,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -156,6 +174,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -206,6 +227,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -231,6 +255,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -256,6 +283,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -281,6 +311,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -306,6 +339,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -331,6 +367,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -356,6 +395,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -381,6 +423,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -406,6 +451,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -431,6 +479,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -456,6 +507,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -481,6 +535,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -506,6 +563,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -531,6 +591,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -556,6 +619,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -581,6 +647,9 @@
"pedit",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -606,6 +675,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -631,6 +703,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -656,6 +731,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -681,6 +759,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -706,6 +787,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -731,6 +815,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -756,6 +843,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -779,6 +869,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -804,6 +897,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -829,6 +925,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -854,6 +953,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -879,6 +981,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -904,6 +1009,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -929,6 +1037,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -954,6 +1065,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -979,6 +1093,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1004,6 +1121,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1029,6 +1149,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1054,6 +1177,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1079,6 +1205,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1104,6 +1233,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1129,6 +1261,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1154,6 +1289,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1179,6 +1317,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1204,6 +1345,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1229,6 +1373,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1254,6 +1401,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1279,6 +1429,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1304,6 +1457,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1329,6 +1485,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1354,6 +1513,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1379,6 +1541,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1404,6 +1569,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1429,6 +1597,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1454,6 +1625,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1479,6 +1653,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1504,6 +1681,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1554,6 +1734,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1579,6 +1762,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1629,6 +1815,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1654,6 +1843,9 @@
"pedit",
"layered_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1680,6 +1872,9 @@
"layered_op",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
@@ -1706,6 +1901,9 @@
"layered_op",
"raw_op"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action pedit",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index b7205a069534..dd8109768f8f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -6,6 +6,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -30,6 +33,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -55,6 +61,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -79,6 +88,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -103,6 +115,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -127,6 +142,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -151,6 +169,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -175,6 +196,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -199,6 +223,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -223,6 +250,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -247,6 +277,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -271,6 +304,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -295,6 +331,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -319,6 +358,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -343,6 +385,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -367,6 +412,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -391,6 +439,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -415,6 +466,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -439,6 +493,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -463,6 +520,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -488,6 +548,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -520,6 +583,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -545,6 +611,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -577,6 +646,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC actions add action police rate 1mbit burst 100k index 1",
"$TC actions add action police rate 2mbit burst 200k index 2",
@@ -603,6 +675,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -627,6 +702,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -651,6 +729,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -675,6 +756,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -699,6 +783,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -723,6 +810,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -747,6 +837,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -772,6 +865,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -796,6 +892,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
@@ -820,6 +919,9 @@
"actions",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action police",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
index 148d8bcb8606..af35e2f30a95 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
@@ -6,6 +6,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -30,6 +33,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -54,6 +60,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -78,6 +87,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -102,6 +114,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -126,6 +141,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -150,6 +168,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -174,6 +195,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -196,6 +220,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -218,6 +245,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -240,6 +270,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -262,6 +295,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -284,6 +320,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -308,6 +347,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -332,6 +374,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -356,6 +401,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -380,6 +428,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -402,6 +453,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -424,6 +478,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -446,6 +503,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -468,6 +528,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -492,6 +555,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -516,6 +582,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -541,6 +610,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -566,6 +638,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -591,6 +666,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -616,6 +694,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -641,6 +722,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
@@ -666,6 +750,9 @@
"actions",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action sample",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
index e0c5f060ccb9..ac960e70dc9b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
@@ -6,6 +6,9 @@
"actions",
"simple"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action simple",
@@ -30,6 +33,9 @@
"actions",
"simple"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action simple",
@@ -54,6 +60,9 @@
"actions",
"simple"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action simple",
@@ -79,6 +88,9 @@
"actions",
"simple"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action simple",
@@ -106,6 +118,9 @@
"actions",
"simple"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action simple",
@@ -131,6 +146,9 @@
"actions",
"simple"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action simple",
@@ -158,6 +176,9 @@
"actions",
"simple"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action simple",
@@ -183,6 +204,9 @@
"actions",
"simple"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action simple",
@@ -213,6 +237,9 @@
"actions",
"simple"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action simple",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
index 9cdd2e31ac2c..27ba0f72e904 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -6,6 +6,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -30,6 +33,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -54,6 +60,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -76,6 +85,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -100,6 +112,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -124,6 +139,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -146,6 +164,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -168,6 +189,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -193,6 +217,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -217,6 +244,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -241,6 +271,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -265,6 +298,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -289,6 +325,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -313,6 +352,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -337,6 +379,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -361,6 +406,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -385,6 +433,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -409,6 +460,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -433,6 +487,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -457,6 +514,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -481,6 +541,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -505,6 +568,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -529,6 +595,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -557,6 +626,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -581,6 +653,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -603,6 +678,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -628,6 +706,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC actions add action skbedit mark 500",
"$TC actions add action skbedit mark 501",
@@ -653,6 +734,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -678,6 +762,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
@@ -702,6 +789,9 @@
"actions",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbedit",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
index 742f2290973e..33ed7a8099e2 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
@@ -6,6 +6,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -30,6 +33,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -54,6 +60,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -78,6 +87,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -102,6 +114,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -126,6 +141,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -150,6 +168,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -174,6 +195,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -198,6 +222,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -222,6 +249,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -246,6 +276,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -270,6 +303,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -294,6 +330,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -323,6 +362,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -352,6 +394,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -377,6 +422,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC actions add action skbmod set etype 0x0001",
"$TC actions add action skbmod set etype 0x0011",
@@ -400,6 +448,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
@@ -425,6 +476,9 @@
"actions",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action skbmod",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index b5b47fbf6c00..0b6f0b5aeaad 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -6,6 +6,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -30,6 +33,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -59,6 +65,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -88,6 +97,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -117,6 +129,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -146,6 +161,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -175,6 +193,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -204,6 +225,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -228,6 +252,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -252,6 +279,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -281,6 +311,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -305,6 +338,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -334,6 +370,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -358,6 +397,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -387,6 +429,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -411,6 +456,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -435,6 +483,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -459,6 +510,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -488,6 +542,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -517,6 +574,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -541,6 +601,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -565,6 +628,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -594,6 +660,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -618,6 +687,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -642,6 +714,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -666,6 +741,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -690,6 +768,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -714,6 +795,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -738,6 +822,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -762,6 +849,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -786,6 +876,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -811,6 +904,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -836,6 +932,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -864,6 +963,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -892,6 +994,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -917,6 +1022,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -941,6 +1049,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -966,6 +1077,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action tunnel_key",
@@ -991,6 +1105,9 @@
"actions",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"dependsOn": "$TC actions add action tunnel_key help 2>&1 | grep -q nofrag",
"setup": [
[
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
index 2aad4caa8581..e5fe8762978a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
@@ -6,6 +6,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -30,6 +33,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -54,6 +60,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -78,6 +87,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -102,6 +114,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -126,6 +141,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -150,6 +168,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -174,6 +195,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -196,6 +220,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -220,6 +247,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -242,6 +272,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -264,6 +297,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -286,6 +322,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -310,6 +349,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -334,6 +376,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -358,6 +403,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -382,6 +430,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -406,6 +457,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -430,6 +484,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -452,6 +509,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -476,6 +536,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -500,6 +563,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -524,6 +590,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -549,6 +618,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -574,6 +646,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -599,6 +674,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -624,6 +702,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -647,6 +728,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -670,6 +754,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -696,6 +783,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -720,6 +810,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -745,6 +838,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -769,6 +865,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -792,6 +891,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -816,6 +918,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
@@ -839,6 +944,9 @@
"actions",
"vlan"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action vlan",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/xt.json b/tools/testing/selftests/tc-testing/tc-tests/actions/xt.json
index c9f002aea6d4..1a92e8898fec 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/xt.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/xt.json
@@ -6,6 +6,9 @@
"actions",
"xt"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action xt",
@@ -30,6 +33,9 @@
"actions",
"xt"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action xt",
@@ -60,6 +66,9 @@
"actions",
"xt"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action xt",
@@ -90,6 +99,9 @@
"actions",
"xt"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action xt",
@@ -120,6 +132,9 @@
"actions",
"xt"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC action flush action xt",
@@ -147,6 +162,9 @@
"actions",
"xt"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action xt",
@@ -174,6 +192,9 @@
"actions",
"xt"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action xt",
@@ -199,6 +220,9 @@
"actions",
"xt"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
[
"$TC actions flush action xt",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json
index 1f0cae474db2..013fb983bc3f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json
@@ -51,7 +51,10 @@
"bpf-filter"
],
"plugins": {
- "requires": "buildebpfPlugin"
+ "requires": [
+ "buildebpfPlugin",
+ "nsPlugin"
+ ]
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
@@ -73,7 +76,10 @@
"bpf-filter"
],
"plugins": {
- "requires": "buildebpfPlugin"
+ "requires": [
+ "buildebpfPlugin",
+ "nsPlugin"
+ ]
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json
index 5272049566d6..a9b071e1354b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json
@@ -53,111 +53,6 @@
"plugins": {
"requires": "nsPlugin"
},
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
- "plugins": {
- "requires": "nsPlugin"
- },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -173,14 +68,15 @@
{
"id": "c591",
"name": "Add fw filter with action ok by reference",
- "__comment": "We add sleep here because action might have not been deleted by workqueue just yet. Remove this when the behaviour is fixed.",
"category": [
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress",
- "/bin/sleep 1",
"$TC actions add action gact ok index 1"
],
"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action gact index 1",
@@ -189,9 +85,7 @@
"matchPattern": "handle 0x1.*gact action pass.*index 1 ref 2 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DEV1 ingress",
- "/bin/sleep 1",
- "$TC actions del action gact index 1"
+ "$TC qdisc del dev $DEV1 ingress"
]
},
{
@@ -201,6 +95,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -216,14 +113,15 @@
{
"id": "38b3",
"name": "Add fw filter with action continue by reference",
- "__comment": "We add sleep here because action might have not been deleted by workqueue just yet. Remove this when the behaviour is fixed.",
"category": [
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress",
- "/bin/sleep 1",
"$TC actions add action gact continue index 1"
],
"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action gact index 1",
@@ -232,9 +130,7 @@
"matchPattern": "handle 0x1.*gact action continue.*index 1 ref 2 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DEV1 ingress",
- "/bin/sleep 1",
- "$TC actions del action gact index 1"
+ "$TC qdisc del dev $DEV1 ingress"
]
},
{
@@ -244,6 +140,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -259,14 +158,15 @@
{
"id": "6753",
"name": "Add fw filter with action pipe by reference",
- "__comment": "We add sleep here because action might have not been deleted by workqueue just yet.",
"category": [
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress",
- "/bin/sleep 1",
"$TC actions add action gact pipe index 1"
],
"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action gact index 1",
@@ -275,9 +175,7 @@
"matchPattern": "handle 0x1.*gact action pipe.*index 1 ref 2 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DEV1 ingress",
- "/bin/sleep 1",
- "$TC actions del action gact index 1"
+ "$TC qdisc del dev $DEV1 ingress"
]
},
{
@@ -287,6 +185,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -302,14 +203,15 @@
{
"id": "6dc6",
"name": "Add fw filter with action drop by reference",
- "__comment": "We add sleep here because action might have not been deleted by workqueue just yet.",
"category": [
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress",
- "/bin/sleep 1",
"$TC actions add action gact drop index 1"
],
"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action gact index 1",
@@ -318,9 +220,7 @@
"matchPattern": "handle 0x1.*gact action drop.*index 1 ref 2 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DEV1 ingress",
- "/bin/sleep 1",
- "$TC actions del action gact index 1"
+ "$TC qdisc del dev $DEV1 ingress"
]
},
{
@@ -330,6 +230,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -345,14 +248,15 @@
{
"id": "3bc2",
"name": "Add fw filter with action reclassify by reference",
- "__comment": "We add sleep here because action might have not been deleted by workqueue just yet.",
"category": [
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress",
- "/bin/sleep 1",
"$TC actions add action gact reclassify index 1"
],
"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action gact index 1",
@@ -361,9 +265,7 @@
"matchPattern": "handle 0x1.*gact action reclassify.*index 1 ref 2 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DEV1 ingress",
- "/bin/sleep 1",
- "$TC actions del action gact index 1"
+ "$TC qdisc del dev $DEV1 ingress"
]
},
{
@@ -373,6 +275,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -388,14 +293,15 @@
{
"id": "36f7",
"name": "Add fw filter with action jump 10 by reference",
- "__comment": "We add sleep here because action might have not been deleted by workqueue just yet.",
"category": [
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress",
- "/bin/sleep 1",
"$TC actions add action gact jump 10 index 1"
],
"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action gact index 1",
@@ -404,9 +310,7 @@
"matchPattern": "handle 0x1.*gact action jump 10.*index 1 ref 2 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DEV1 ingress",
- "/bin/sleep 1",
- "$TC actions del action gact index 1"
+ "$TC qdisc del dev $DEV1 ingress"
]
},
{
@@ -416,6 +320,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -435,6 +342,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -454,6 +364,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -473,6 +386,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -492,6 +408,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -511,6 +430,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -530,6 +452,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -549,6 +474,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -568,6 +496,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -587,6 +518,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -606,6 +540,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -625,6 +562,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -644,6 +584,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -663,6 +606,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -682,6 +628,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -701,6 +650,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -720,6 +672,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -739,6 +694,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -758,6 +716,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -777,6 +738,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -796,6 +760,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -815,6 +782,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -834,6 +804,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -853,6 +826,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -872,6 +848,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -891,6 +870,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -910,6 +892,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -929,6 +914,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -948,6 +936,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -967,6 +958,9 @@
"filter",
"fw"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@ -1096,7 +1090,6 @@
{
"id": "0e99",
"name": "Del single fw filter x1",
- "__comment__": "First of two tests to check that one filter is there and the other isn't",
"category": [
"filter",
"fw"
@@ -1121,7 +1114,6 @@
{
"id": "f54c",
"name": "Del single fw filter x2",
- "__comment__": "Second of two tests to check that one filter is there and the other isn't",
"category": [
"filter",
"fw"
@@ -1351,5 +1343,54 @@
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
]
+ },
+ {
+ "id": "e470",
+ "name": "Try to delete class referenced by fw after a replace",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 parent root handle 10: drr",
+ "$TC class add dev $DEV1 parent root classid 1 drr",
+ "$TC filter add dev $DEV1 parent 10: handle 1 prio 1 fw classid 10:1 action ok",
+ "$TC filter replace dev $DEV1 parent 10: handle 1 prio 1 fw classid 10:1 action drop"
+ ],
+ "cmdUnderTest": "$TC class delete dev $DEV1 parent 10: classid 10:1",
+ "expExitCode": "2",
+ "verifyCmd": "$TC class show dev $DEV1",
+ "matchPattern": "class drr 10:1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 parent root drr"
+ ]
+ },
+ {
+ "id": "ec1a",
+ "name": "Replace fw classid with nil",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 parent root handle 10: drr",
+ "$TC class add dev $DEV1 parent root classid 1 drr",
+ "$TC filter add dev $DEV1 parent 10: handle 1 prio 1 fw classid 10:1 action ok"
+ ],
+ "cmdUnderTest": "$TC filter replace dev $DEV1 parent 10: handle 1 prio 1 fw action drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent 10:",
+ "matchPattern": "fw chain 0 handle 0x1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 parent root drr"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json b/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json
index 2df68017dfb8..afa1b9b0c856 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json
@@ -6,8 +6,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action ok",
@@ -16,8 +18,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*gact action pass.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -27,8 +28,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY root handle 1: prio"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent 1: handle 0x1 prio 1 protocol ip matchall action ok",
@@ -37,8 +40,7 @@
"matchPattern": "^filter parent 1: protocol ip pref 1 matchall.*handle 0x1.*gact action pass.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY root handle 1: prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY root handle 1: prio"
]
},
{
@@ -48,8 +50,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv6 matchall action drop",
@@ -58,8 +62,7 @@
"matchPattern": "^filter parent ffff: protocol ipv6 pref 1 matchall.*handle 0x1.*gact action drop.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -69,8 +72,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY root handle 1: prio"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent 1: handle 0x1 prio 1 protocol ipv6 matchall action drop",
@@ -79,8 +84,7 @@
"matchPattern": "^filter parent 1: protocol ipv6 pref 1 matchall.*handle 0x1.*gact action drop.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY root handle 1: prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY root handle 1: prio"
]
},
{
@@ -90,8 +94,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 65535 protocol ipv4 matchall action pass",
@@ -100,8 +106,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 65535 matchall.*handle 0x1.*gact action pass.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -111,8 +116,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY root handle 1: prio"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent 1: handle 0x1 prio 65535 protocol ipv4 matchall action pass",
@@ -121,8 +128,7 @@
"matchPattern": "^filter parent 1: protocol ip pref 65535 matchall.*handle 0x1.*gact action pass.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY root handle 1: prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY root handle 1: prio"
]
},
{
@@ -132,8 +138,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 655355 protocol ipv4 matchall action pass",
@@ -142,8 +150,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 655355 matchall.*handle 0x1.*gact action pass.*ref 1 bind 1",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -153,8 +160,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY root handle 1: prio"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent 1: handle 0x1 prio 655355 protocol ipv4 matchall action pass",
@@ -163,8 +172,7 @@
"matchPattern": "^filter parent 1: protocol ip pref 655355 matchall.*handle 0x1.*gact action pass.*ref 1 bind 1",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY root handle 1: prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY root handle 1: prio"
]
},
{
@@ -174,8 +182,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0xffffffff prio 1 protocol all matchall action continue",
@@ -184,8 +194,7 @@
"matchPattern": "^filter parent ffff: protocol all pref 1 matchall.*handle 0xffffffff.*gact action continue.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -195,8 +204,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY root handle 1: prio"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent 1: handle 0xffffffff prio 1 protocol all matchall action continue",
@@ -205,8 +216,7 @@
"matchPattern": "^filter parent 1: protocol all pref 1 matchall.*handle 0xffffffff.*gact action continue.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY root handle 1: prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY root handle 1: prio"
]
},
{
@@ -216,8 +226,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol all matchall skip_hw action reclassify",
@@ -226,8 +238,7 @@
"matchPattern": "^filter parent ffff: protocol all pref 1 matchall.*handle 0x1.*skip_hw.*not_in_hw.*gact action reclassify.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -237,8 +248,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY root handle 1: prio"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent 1: handle 0x1 prio 1 protocol all matchall skip_hw action reclassify",
@@ -247,8 +260,7 @@
"matchPattern": "^filter parent 1: protocol all pref 1 matchall.*handle 0x1.*skip_hw.*not_in_hw.*gact action reclassify.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY root handle 1: prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY root handle 1: prio"
]
},
{
@@ -258,8 +270,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv6 matchall classid 1:1 action pass",
@@ -268,8 +282,7 @@
"matchPattern": "^filter parent ffff: protocol ipv6 pref 1 matchall.*handle 0x1.*flowid 1:1.*gact action pass.*ref 1 bind 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -279,8 +292,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress"
],
"cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv6 matchall classid 6789defg action pass",
@@ -289,8 +304,7 @@
"matchPattern": "^filter protocol ipv6 pref 1 matchall.*handle 0x1.*flowid 6789defg.*gact action pass.*ref 1 bind 1",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -300,8 +314,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv6 matchall classid 1:2 action pass"
],
@@ -311,8 +327,7 @@
"matchPattern": "^filter protocol ipv6 pref 1 matchall.*handle 0x1.*flowid 1:2.*gact action pass.*ref 1 bind 1",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -322,8 +337,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol all matchall classid 1:2 action pass",
"$TC filter add dev $DUMMY parent ffff: handle 0x2 prio 2 protocol all matchall classid 1:3 action pass",
@@ -336,8 +353,7 @@
"matchPattern": "^filter protocol all pref.*matchall.*handle.*flowid.*gact action pass",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -347,8 +363,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol all matchall classid 1:2 action pass",
"$TC filter add dev $DUMMY parent ffff: handle 0x2 prio 2 protocol all matchall classid 1:3 action pass",
@@ -361,8 +379,7 @@
"matchPattern": "^filter protocol all pref 2 matchall.*handle 0x2 flowid 1:2.*gact action pass",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -372,8 +389,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol all chain 1 matchall classid 1:1 action pass",
"$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv4 chain 2 matchall classid 1:3 action continue"
@@ -384,8 +403,7 @@
"matchPattern": "^filter protocol all pref 1 matchall chain 1 handle 0x1 flowid 1:1.*gact action pass",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -395,8 +413,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions flush action police",
"$TC actions add action police rate 1mbit burst 100k index 199 skip_hw"
@@ -408,7 +428,6 @@
"matchCount": "0",
"teardown": [
"$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
"$TC actions del action police index 199"
]
},
@@ -419,8 +438,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions flush action police",
"$TC actions add action police rate 1mbit burst 100k index 199"
@@ -432,7 +453,6 @@
"matchCount": "0",
"teardown": [
"$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
"$TC actions del action police index 199"
]
},
@@ -443,8 +463,10 @@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions flush action police",
"$TC actions add action police rate 1mbit burst 100k index 199"
@@ -456,7 +478,6 @@
"matchCount": "0",
"teardown": [
"$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
"$TC actions del action police index 199"
]
}
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/route.json b/tools/testing/selftests/tc-testing/tc-tests/filters/route.json
index 1f6f19f02997..8d8de8f65aef 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/route.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/route.json
@@ -177,5 +177,30 @@
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
]
+ },
+ {
+ "id": "b042",
+ "name": "Try to delete class referenced by route after a replace",
+ "category": [
+ "filter",
+ "route"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 parent root handle 10: drr",
+ "$TC class add dev $DEV1 parent root classid 1 drr",
+ "$TC filter add dev $DEV1 parent 10: prio 1 route from 10 classid 10:1 action ok",
+ "$TC filter replace dev $DEV1 parent 10: prio 1 route from 5 classid 10:1 action drop"
+ ],
+ "cmdUnderTest": "$TC class delete dev $DEV1 parent 10: classid 10:1",
+ "expExitCode": "2",
+ "verifyCmd": "$TC class show dev $DEV1",
+ "matchPattern": "class drr 10:1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 parent root drr"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json b/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json
index bd64a4bf11ab..ddc7c355be0a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json
@@ -247,5 +247,30 @@
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
]
+ },
+ {
+ "id": "0c37",
+ "name": "Try to delete class referenced by u32 after a replace",
+ "category": [
+ "filter",
+ "u32"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 parent root handle 10: drr",
+ "$TC class add dev $DEV1 parent root classid 1 drr",
+ "$TC filter add dev $DEV1 parent 10: prio 1 u32 match icmp type 1 0xff classid 10:1 action ok",
+ "$TC filter replace dev $DEV1 parent 10: prio 1 u32 match icmp type 1 0xff classid 10:1 action drop"
+ ],
+ "cmdUnderTest": "$TC class delete dev $DEV1 parent 10: classid 10:1",
+ "expExitCode": "2",
+ "verifyCmd": "$TC class show dev $DEV1",
+ "matchPattern": "class drr 10:1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 parent root drr"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json b/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json
index 16f3a83605e4..1ba96c467754 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json
@@ -6,8 +6,10 @@
"infra",
"pedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC action add action pedit munge offset 0 u8 clear index 1"
],
@@ -17,9 +19,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action pedit"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -29,8 +29,10 @@
"infra",
"mpls"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC action add action mpls pop protocol ipv4 index 1"
],
@@ -40,9 +42,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action mpls"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -52,8 +52,10 @@
"infra",
"bpf"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' index 1"
],
@@ -63,9 +65,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action bpf"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -75,8 +75,10 @@
"infra",
"connmark"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action connmark"
],
@@ -86,9 +88,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action connmark"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -98,8 +98,10 @@
"infra",
"csum"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action csum ip4h index 1"
],
@@ -109,9 +111,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action csum"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -121,8 +121,10 @@
"infra",
"ct"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action ct index 1"
],
@@ -132,9 +134,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action ct"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -144,8 +144,10 @@
"infra",
"ctinfo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC action add action ctinfo index 1"
],
@@ -155,9 +157,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action ctinfo"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -167,8 +167,10 @@
"infra",
"gact"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action pass index 1"
],
@@ -178,9 +180,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action gact"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -190,8 +190,10 @@
"infra",
"gate"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC action add action gate priority 1 sched-entry close 100000000ns index 1"
],
@@ -201,9 +203,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action gate"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -213,8 +213,10 @@
"infra",
"ife"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action ife encode allow mark pass index 1"
],
@@ -224,9 +226,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action ife"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -236,8 +236,10 @@
"infra",
"mirred"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action mirred egress mirror index 1 dev lo"
],
@@ -247,9 +249,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action mirred"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -259,8 +259,10 @@
"infra",
"nat"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action nat ingress 192.168.1.1 200.200.200.1"
],
@@ -270,9 +272,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action nat"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -282,8 +282,10 @@
"infra",
"police"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action police rate 1kbit burst 10k index 1"
],
@@ -293,9 +295,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action police"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -305,8 +305,10 @@
"infra",
"sample"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action sample rate 10 group 1 index 1"
],
@@ -316,9 +318,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action sample"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -328,8 +328,10 @@
"infra",
"skbedit"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action skbedit mark 1"
],
@@ -339,9 +341,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action skbedit"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -351,8 +351,10 @@
"infra",
"skbmod"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action skbmod set dmac 11:22:33:44:55:66 index 1"
],
@@ -362,9 +364,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action skbmod"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -374,8 +374,10 @@
"infra",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1 index 1"
],
@@ -385,9 +387,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action tunnel_key"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -397,8 +397,10 @@
"infra",
"tunnel_key"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC actions add action vlan pop pipe index 1"
],
@@ -408,9 +410,7 @@
"matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy",
- "$TC actions flush action vlan"
+ "$TC qdisc del dev $DUMMY ingress"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json b/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json
index c4c778e83da2..8d10042b489b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/infra/filter.json
@@ -1,13 +1,15 @@
[
{
"id": "c2b4",
- "name": "soft lockup alarm will be not generated after delete the prio 0 filter of the chain",
+ "name": "Soft lockup alarm will be not generated after delete the prio 0 filter of the chain",
"category": [
"filter",
"chain"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY root handle 1: htb default 1",
"$TC chain add dev $DUMMY",
"$TC filter del dev $DUMMY chain 0 parent 1: prio 0"
@@ -18,8 +20,7 @@
"matchPattern": "chain parent 1: chain 0",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY root handle 1: htb default 1",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY root handle 1: htb default 1"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cake.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cake.json
index 1134b72d281d..c4c5f7ba0e0f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cake.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cake.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake bandwidth 1000",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth 1Kbit diffserv3 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake autorate-ingress",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited autorate-ingress diffserv3 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake rtt 200",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 triple-isolate nonat nowash no-ack-filter split-gso rtt 200us raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake besteffort",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited besteffort triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake diffserv8",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv8 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake diffserv4",
"expExitCode": "0",
@@ -156,8 +143,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv4 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -171,7 +157,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake flowblind",
"expExitCode": "0",
@@ -179,8 +164,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 flowblind nonat nowash no-ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -194,7 +178,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake dsthost nat",
"expExitCode": "0",
@@ -202,8 +185,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 dsthost nat nowash no-ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -217,7 +199,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake hosts wash",
"expExitCode": "0",
@@ -225,8 +206,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 hosts nonat wash no-ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -240,7 +220,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake flowblind no-split-gso",
"expExitCode": "0",
@@ -248,8 +227,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 flowblind nonat nowash no-ack-filter no-split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -263,7 +241,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake dual-srchost ack-filter",
"expExitCode": "0",
@@ -271,8 +248,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 dual-srchost nonat nowash ack-filter split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -286,7 +262,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake dual-dsthost ack-filter-aggressive",
"expExitCode": "0",
@@ -294,8 +269,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 dual-dsthost nonat nowash ack-filter-aggressive split-gso rtt 100ms raw overhead",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -309,7 +283,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake memlimit 10000 ptm",
"expExitCode": "0",
@@ -317,8 +290,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms raw ptm overhead 0 memlimit 10000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -332,7 +304,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake fwmark 8 atm",
"expExitCode": "0",
@@ -340,8 +311,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms raw atm overhead 0 fwmark 0x8",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -355,7 +325,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake overhead 128 mpu 256",
"expExitCode": "0",
@@ -363,8 +332,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms noatm overhead 128 mpu 256",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -378,7 +346,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake conservative ingress",
"expExitCode": "0",
@@ -386,8 +353,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 triple-isolate nonat nowash ingress no-ack-filter split-gso rtt 100ms atm overhead 48",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -401,7 +367,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root cake conservative ingress"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -410,7 +375,6 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 triple-isolate nonat nowash ingress no-ack-filter split-gso rtt 100ms atm overhead 48",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -424,7 +388,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root cake overhead 128 mpu 256"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root cake mpu 128",
@@ -433,8 +396,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms noatm overhead 128 mpu 128",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -448,7 +410,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root cake overhead 128 mpu 256"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root cake mpu 128",
@@ -457,8 +418,7 @@
"matchPattern": "qdisc cake 1: root refcnt [0-9]+ bandwidth unlimited diffserv3 triple-isolate nonat nowash no-ack-filter split-gso rtt 100ms noatm overhead 128 mpu 128",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -472,7 +432,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cake",
"expExitCode": "0",
@@ -480,8 +439,7 @@
"matchPattern": "class cake",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbs.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbs.json
index a46bf5ff8277..33ea986176d9 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbs.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbs.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbs",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc cbs 1: root refcnt [0-9]+ hicredit 0 locredit 0 sendslope 0 idleslope 0 offload 0.*qdisc pfifo 0: parent 1: limit 1000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbs hicredit 64",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc cbs 1: root refcnt [0-9]+ hicredit 64 locredit 0 sendslope 0 idleslope 0 offload 0.*qdisc pfifo 0: parent 1: limit 1000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbs locredit 10",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc cbs 1: root refcnt [0-9]+ hicredit 0 locredit 10 sendslope 0 idleslope 0 offload 0.*qdisc pfifo 0: parent 1: limit 1000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbs sendslope 888",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc cbs 1: root refcnt [0-9]+ hicredit 0 locredit 0 sendslope 888 idleslope 0 offload 0.*qdisc pfifo 0: parent 1: limit 1000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbs idleslope 666",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc cbs 1: root refcnt [0-9]+ hicredit 0 locredit 0 sendslope 0 idleslope 666 offload 0.*qdisc pfifo 0: parent 1: limit 1000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbs hicredit 10 locredit 75 sendslope 2 idleslope 666",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc cbs 1: root refcnt [0-9]+ hicredit 10 locredit 75 sendslope 2 idleslope 666 offload 0.*qdisc pfifo 0: parent 1: limit 1000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root cbs idleslope 666"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root cbs sendslope 10",
@@ -157,8 +144,7 @@
"matchPattern": "qdisc cbs 1: root refcnt [0-9]+ hicredit 0 locredit 0 sendslope 10 idleslope 0 offload 0.*qdisc pfifo 0: parent 1: limit 1000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -172,7 +158,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root cbs idleslope 666"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root cbs idleslope 1",
@@ -181,8 +166,7 @@
"matchPattern": "qdisc cbs 1: root refcnt [0-9]+ hicredit 0 locredit 0 sendslope 0 idleslope 1 offload 0.*qdisc pfifo 0: parent 1: limit 1000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -196,7 +180,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root cbs idleslope 666"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -205,7 +188,6 @@
"matchPattern": "qdisc cbs 1: root refcnt [0-9]+ hicredit 0 locredit 0 sendslope 0 idleslope 1 offload 0.*qdisc pfifo 0: parent 1: limit 1000p",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -219,7 +201,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbs",
"expExitCode": "0",
@@ -227,8 +208,7 @@
"matchPattern": "class cbs 1:[0-9]+ parent 1:",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/choke.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/choke.json
index 31b7775d25fc..d46e5e2c9430 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/choke.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/choke.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc choke 1: root refcnt [0-9]+ limit 1000p min 83p max 250p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000 min 100",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc choke 1: root refcnt [0-9]+ limit 1000p min 100p max 250p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000 max 900",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc choke 1: root refcnt [0-9]+ limit 1000p min.*max 900p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000 ecn",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc choke 1: root refcnt [0-9]+ limit 1000p min 83p max 250p ecn",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000 burst 100",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc choke 1: root refcnt [0-9]+ limit 1000p min 83p max 250p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -134,7 +123,6 @@
"matchPattern": "qdisc choke 1: root refcnt [0-9]+ limit 1000p min 83p max 250p",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000 min 100",
@@ -157,8 +144,7 @@
"matchPattern": "qdisc choke 1: root refcnt [0-9]+ limit 1000p min 100p max 250p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -172,7 +158,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root choke limit 1000 bandwidth 10000 min 100",
@@ -181,8 +166,7 @@
"matchPattern": "qdisc choke 1: root refcnt [0-9]+ limit 1000p min 100p max 250p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/codel.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/codel.json
index ea38099d48e5..e9469ee71e6f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/codel.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/codel.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root codel",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 1000p target 5ms interval 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root codel limit 1500",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 1500p target 5ms interval 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root codel target 100ms",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 1000p target 100ms interval 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root codel interval 20ms",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 1000p target 5ms interval 20ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root codel ecn",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 1000p target 5ms interval 100ms ecn",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root codel ce_threshold 20ms",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 1000p target 5ms ce_threshold 20ms interval 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root codel"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -157,7 +144,6 @@
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 1000p target 5ms interval 100ms",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -171,7 +157,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root codel"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root codel limit 5000",
@@ -180,8 +165,7 @@
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 5000p target 5ms interval 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -195,7 +179,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root codel"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root codel limit 100",
@@ -204,8 +187,7 @@
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 100p target 5ms interval 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json
index 486a425b3c1c..7126ec3485cb 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root drr",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc drr 1: root refcnt [0-9]+",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root drr"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -42,7 +39,6 @@
"matchPattern": "qdisc drr 1: root refcnt [0-9]+",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root drr",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "class drr 1:",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/etf.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/etf.json
index 0046d44bcd93..2c73ee47bf58 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/etf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/etf.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root etf clockid CLOCK_TAI",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc etf 1: root refcnt [0-9]+ clockid TAI delta 0 offload off deadline_mode off skip_sock_check off",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root etf delta 100 clockid CLOCK_TAI",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc etf 1: root refcnt [0-9]+ clockid TAI delta 100 offload off deadline_mode off skip_sock_check off",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root etf clockid CLOCK_TAI deadline_mode",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc etf 1: root refcnt [0-9]+ clockid TAI delta 0 offload off deadline_mode on skip_sock_check off",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root etf clockid CLOCK_TAI skip_sock_check",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc etf 1: root refcnt [0-9]+ clockid TAI delta 0 offload off deadline_mode off skip_sock_check on",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root etf clockid CLOCK_TAI"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -111,7 +102,6 @@
"matchPattern": "qdisc etf 1: root refcnt [0-9]+ clockid TAI delta 0 offload off deadline_mode off skip_sock_check off",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/ets.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/ets.json
index 180593010675..a5d94cdec605 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/ets.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/ets.json
@@ -6,8 +6,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 2",
"expExitCode": "0",
@@ -15,8 +17,7 @@
"matchPattern": "qdisc ets 1: root .* bands 2",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -26,8 +27,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 1000 900 800 700",
"expExitCode": "0",
@@ -35,8 +38,7 @@
"matchPattern": "qdisc ets 1: root .*bands 4 quanta 1000 900 800 700",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -46,8 +48,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 3",
"expExitCode": "0",
@@ -55,8 +59,7 @@
"matchPattern": "qdisc ets 1: root .*bands 3 strict 3",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -66,8 +69,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 4 quanta 1000 900 800 700",
"expExitCode": "0",
@@ -75,8 +80,7 @@
"matchPattern": "qdisc ets 1: root .*bands 4 quanta 1000 900 800 700 priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -86,8 +90,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 3 strict 3",
"expExitCode": "0",
@@ -95,8 +101,7 @@
"matchPattern": "qdisc ets 1: root .*bands 3 strict 3 priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -106,8 +111,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 3 quanta 1500 750",
"expExitCode": "0",
@@ -115,8 +122,7 @@
"matchPattern": "qdisc ets 1: root .*bands 5 strict 3 quanta 1500 750 priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -126,8 +132,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 0 quanta 1500 750",
"expExitCode": "0",
@@ -135,8 +143,7 @@
"matchPattern": "qdisc ets 1: root .*bands 2 quanta 1500 750 priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -146,8 +153,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 5 strict 3 quanta 1500 750",
"expExitCode": "0",
@@ -155,8 +164,7 @@
"matchPattern": "qdisc ets 1: root .*bands 5 .*strict 3 quanta 1500 750 priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -166,8 +174,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 2 quanta 1000",
"expExitCode": "0",
@@ -175,8 +185,7 @@
"matchPattern": "qdisc ets 1: root .*bands 2 .*quanta 1000 [1-9][0-9]* priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -186,8 +195,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 3 strict 1",
"expExitCode": "0",
@@ -195,8 +206,7 @@
"matchPattern": "qdisc ets 1: root .*bands 3 strict 1 quanta ([1-9][0-9]* ){2}priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -206,8 +216,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 3 strict 1 quanta 1000",
"expExitCode": "0",
@@ -215,8 +227,7 @@
"matchPattern": "qdisc ets 1: root .*bands 3 strict 1 quanta 1000 [1-9][0-9]* priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -226,8 +237,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 16",
"expExitCode": "0",
@@ -235,8 +248,7 @@
"matchPattern": "qdisc ets 1: root .* bands 16",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -246,8 +258,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 17",
"expExitCode": "1",
@@ -255,7 +269,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -265,8 +278,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 17",
"expExitCode": "1",
@@ -274,7 +289,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -284,8 +298,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16",
"expExitCode": "0",
@@ -293,8 +309,7 @@
"matchPattern": "qdisc ets 1: root .* bands 16",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -304,8 +319,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17",
"expExitCode": "2",
@@ -313,7 +330,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -323,8 +339,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 8 quanta 1 2 3 4 5 6 7 8",
"expExitCode": "0",
@@ -332,8 +350,7 @@
"matchPattern": "qdisc ets 1: root .* bands 16",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -343,8 +360,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 9 quanta 1 2 3 4 5 6 7 8",
"expExitCode": "2",
@@ -352,7 +371,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -362,8 +380,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 5 priomap 0 0 1 0 1 2 0 1 2 3 0 1 2 3 4 0",
"expExitCode": "0",
@@ -371,8 +391,7 @@
"matchPattern": "qdisc ets 1: root .*priomap 0 0 1 0 1 2 0 1 2 3 0 1 2 3 4 0",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -382,8 +401,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 1000 2000 3000 4000 5000 priomap 0 0 1 0 1 2 0 1 2 3 0 1 2 3 4 0",
"expExitCode": "0",
@@ -391,8 +412,7 @@
"matchPattern": "qdisc ets 1: root .*quanta 1000 2000 3000 4000 5000 priomap 0 0 1 0 1 2 0 1 2 3 0 1 2 3 4 0",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -402,8 +422,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 5 priomap 0 0 1 0 1 2 0 1 2 3 0 1 2 3 4 0",
"expExitCode": "0",
@@ -411,8 +433,7 @@
"matchPattern": "qdisc ets 1: root .*bands 5 strict 5 priomap 0 0 1 0 1 2 0 1 2 3 0 1 2 3 4 0",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -422,8 +443,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 2 quanta 1000 2000 3000 priomap 0 0 1 0 1 2 0 1 2 3 0 1 2 3 4 0",
"expExitCode": "0",
@@ -431,8 +454,7 @@
"matchPattern": "qdisc ets 1: root .*strict 2 quanta 1000 2000 3000 priomap 0 0 1 0 1 2 0 1 2 3 0 1 2 3 4 0",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -442,8 +464,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 4000 3000 2000",
"expExitCode": "0",
@@ -451,8 +475,7 @@
"matchPattern": "class ets 1:1 root quantum 4000",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -462,8 +485,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 4000 3000 2000",
"expExitCode": "0",
@@ -471,8 +496,7 @@
"matchPattern": "class ets 1:2 root quantum 3000",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -482,8 +506,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 4000 3000 2000",
"expExitCode": "0",
@@ -491,8 +517,7 @@
"matchPattern": "class ets 1:3 root quantum 2000",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -502,8 +527,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 3",
"expExitCode": "0",
@@ -511,8 +538,7 @@
"matchPattern": "class ets 1:1 root $",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -522,8 +548,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 2 quanta 1000 2000 3000",
"expExitCode": "1",
@@ -531,7 +559,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -541,8 +568,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 2 strict 3",
"expExitCode": "1",
@@ -550,7 +579,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -560,8 +588,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 4 strict 2 quanta 1000 2000 3000",
"expExitCode": "1",
@@ -569,7 +599,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -579,8 +608,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 5 priomap 0 0 1 0 1 2 0 1 2 3 0 1 2 3 4 0 1 2",
"expExitCode": "1",
@@ -588,7 +619,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -598,8 +628,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 2 priomap 0 1 2",
"expExitCode": "1",
@@ -607,7 +639,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -617,8 +648,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 1000 500 priomap 0 1 2",
"expExitCode": "1",
@@ -626,7 +659,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -636,8 +668,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 2 priomap 0 1 2",
"expExitCode": "1",
@@ -645,7 +679,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -655,8 +688,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets strict 1 quanta 1000 500 priomap 0 1 2 3",
"expExitCode": "1",
@@ -664,7 +699,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -674,8 +708,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 4 strict 1 quanta 1000 500 priomap 0 1 2 3",
"expExitCode": "0",
@@ -683,7 +719,6 @@
"matchPattern": "qdisc ets",
"matchCount": "1",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -693,8 +728,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 4 strict 1 quanta 1000 500 priomap 0 1 2 3 4",
"expExitCode": "1",
@@ -702,7 +739,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -712,8 +748,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 4 priomap 0 0 0 0",
"expExitCode": "0",
@@ -721,7 +759,6 @@
"matchPattern": "qdisc ets .*priomap 0 0 0 0 3 3 3 3 3 3 3 3 3 3 3 3",
"matchCount": "1",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -731,8 +768,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 4",
"expExitCode": "0",
@@ -740,7 +779,6 @@
"matchPattern": "qdisc ets .*priomap 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3",
"matchCount": "1",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -750,8 +788,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 0",
"expExitCode": "1",
@@ -759,7 +799,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -769,8 +808,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets bands 17",
"expExitCode": "1",
@@ -778,7 +819,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -788,8 +828,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets",
"expExitCode": "1",
@@ -797,7 +839,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -807,8 +848,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 1000 0 800 700",
"expExitCode": "1",
@@ -816,7 +859,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -826,8 +868,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta 0",
"expExitCode": "1",
@@ -835,7 +879,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -845,8 +888,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root ets quanta",
"expExitCode": "255",
@@ -854,7 +899,6 @@
"matchPattern": "qdisc ets",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -864,8 +908,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root ets quanta 1000 2000 3000"
],
"cmdUnderTest": "$TC class change dev $DUMMY classid 1:1 ets quantum 1500",
@@ -874,7 +920,6 @@
"matchPattern": "qdisc ets 1: root .*quanta 1500 2000 3000 priomap ",
"matchCount": "1",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -884,8 +929,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root ets quanta 1000 2000 3000"
],
"cmdUnderTest": "$TC class change dev $DUMMY classid 1:1 ets",
@@ -894,7 +941,6 @@
"matchPattern": "qdisc ets 1: root .*quanta 1000 2000 3000 priomap ",
"matchCount": "1",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -904,8 +950,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root ets strict 5"
],
"cmdUnderTest": "$TC class change dev $DUMMY classid 1:2 ets quantum 1500",
@@ -914,7 +962,6 @@
"matchPattern": "qdisc ets .*bands 5 .*strict 5",
"matchCount": "1",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -924,8 +971,10 @@
"qdisc",
"ets"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root ets strict 5"
],
"cmdUnderTest": "$TC class change dev $DUMMY classid 1:2 ets",
@@ -934,7 +983,6 @@
"matchPattern": "qdisc ets .*bands 5 .*strict 5",
"matchCount": "1",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json
index 5ecd93b4c473..ae3d286a32b2 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json
@@ -2,13 +2,14 @@
{
"id": "a519",
"name": "Add bfifo qdisc with system default parameters on egress",
- "__comment": "When omitted, queue size in bfifo is calculated as: txqueuelen * (MTU + LinkLayerHdrSize), where LinkLayerHdrSize=14 for Ethernet",
"category": [
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root bfifo",
"expExitCode": "0",
@@ -16,20 +17,20 @@
"matchPattern": "qdisc bfifo 1: root.*limit [0-9]+b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root bfifo",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root bfifo"
]
},
{
"id": "585c",
"name": "Add pfifo qdisc with system default parameters on egress",
- "__comment": "When omitted, queue size in pfifo is defaulted to the interface's txqueuelen value.",
"category": [
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root pfifo",
"expExitCode": "0",
@@ -37,8 +38,7 @@
"matchPattern": "qdisc pfifo 1: root.*limit [0-9]+p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root pfifo",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root pfifo"
]
},
{
@@ -48,8 +48,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY root handle ffff: bfifo",
"expExitCode": "0",
@@ -57,8 +59,7 @@
"matchPattern": "qdisc bfifo ffff: root.*limit [0-9]+b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle ffff: root bfifo",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle ffff: root bfifo"
]
},
{
@@ -68,8 +69,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root bfifo limit 3000b",
"expExitCode": "0",
@@ -77,8 +80,7 @@
"matchPattern": "qdisc bfifo 1: root.*limit 3000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root bfifo",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root bfifo"
]
},
{
@@ -88,8 +90,11 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY txqueuelen 3000 type dummy || /bin/true"
+ "$IP link set dev $DUMMY txqueuelen 3000"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root pfifo limit 3000",
"expExitCode": "0",
@@ -97,8 +102,7 @@
"matchPattern": "qdisc pfifo 1: root.*limit 3000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root pfifo",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root pfifo"
]
},
{
@@ -108,8 +112,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY root handle 10000: bfifo",
"expExitCode": "255",
@@ -117,7 +123,6 @@
"matchPattern": "qdisc bfifo 10000: root.*limit [0-9]+b",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -127,8 +132,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root bfifo foorbar",
"expExitCode": "1",
@@ -136,7 +143,6 @@
"matchPattern": "qdisc bfifo 1: root",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -146,8 +152,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root pfifo foorbar",
"expExitCode": "1",
@@ -155,7 +163,6 @@
"matchPattern": "qdisc pfifo 1: root",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -165,9 +172,11 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link del dev $DUMMY type dummy || /bin/true",
- "$IP link add dev $DUMMY txqueuelen 1000 type dummy",
+ "$IP link set dev $DUMMY txqueuelen 1000",
"$TC qdisc add dev $DUMMY handle 1: root bfifo"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root bfifo limit 3000b",
@@ -176,8 +185,7 @@
"matchPattern": "qdisc bfifo 1: root.*limit 3000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root bfifo",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root bfifo"
]
},
{
@@ -187,9 +195,11 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link del dev $DUMMY type dummy || /bin/true",
- "$IP link add dev $DUMMY txqueuelen 1000 type dummy",
+ "$IP link set dev $DUMMY txqueuelen 1000",
"$TC qdisc add dev $DUMMY handle 1: root pfifo"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root pfifo limit 30",
@@ -198,8 +208,7 @@
"matchPattern": "qdisc pfifo 1: root.*limit 30p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root pfifo",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root pfifo"
]
},
{
@@ -209,8 +218,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root bfifo limit foo-bar",
"expExitCode": "1",
@@ -218,7 +229,6 @@
"matchPattern": "qdisc bfifo 1: root.*limit foo-bar",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -228,8 +238,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root bfifo"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root bfifo",
@@ -238,8 +250,7 @@
"matchPattern": "qdisc bfifo 1: root",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root bfifo",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root bfifo"
]
},
{
@@ -249,8 +260,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY root handle 1: bfifo",
"expExitCode": "2",
@@ -258,7 +271,6 @@
"matchPattern": "qdisc bfifo 1: root",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -268,8 +280,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY root handle 123^ bfifo limit 100b",
"expExitCode": "255",
@@ -277,7 +291,6 @@
"matchPattern": "qdisc bfifo 123 root",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -287,8 +300,10 @@
"qdisc",
"fifo"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY root handle 1: bfifo",
"$TC qdisc del dev $DUMMY root handle 1: bfifo"
],
@@ -298,7 +313,6 @@
"matchPattern": "qdisc bfifo 1: root",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
index 3593fb8f79ad..be293e7c6d18 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq limit 3000",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 3000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq flow_limit 300",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 300p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq quantum 9000",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p buckets.*orphan_mask 1023 quantum 9000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq initial_quantum 900000",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p buckets.*initial_quantum 900000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq initial_quantum 0x80000000",
"expExitCode": "2",
@@ -133,7 +122,6 @@
"matchPattern": "qdisc fq 1: root.*initial_quantum 2048Mb",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -147,7 +135,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq maxrate 100000",
"expExitCode": "0",
@@ -155,8 +142,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p buckets.*maxrate 100Kbit",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -170,7 +156,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq nopacing",
"expExitCode": "0",
@@ -178,8 +163,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p.*nopacing",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -193,7 +177,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq refill_delay 100ms",
"expExitCode": "0",
@@ -201,8 +184,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p.*refill_delay 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -216,7 +198,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq low_rate_threshold 10000",
"expExitCode": "0",
@@ -224,8 +205,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p.*low_rate_threshold 10Kbit",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -239,7 +219,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq orphan_mask 255",
"expExitCode": "0",
@@ -247,8 +226,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p.*orphan_mask 255",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -262,7 +240,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq timer_slack 100",
"expExitCode": "0",
@@ -270,8 +247,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p.*timer_slack 100ns",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -285,7 +261,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq ce_threshold 100",
"expExitCode": "0",
@@ -293,8 +268,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -308,7 +282,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq horizon 100",
"expExitCode": "0",
@@ -316,8 +289,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p.*horizon 100us",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -331,7 +303,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq horizon_cap",
"expExitCode": "0",
@@ -339,8 +310,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p.*horizon_cap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -354,7 +324,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root fq"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -363,7 +332,6 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -377,7 +345,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root fq"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root fq limit 5000",
@@ -386,8 +353,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 5000p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -401,7 +367,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root fq"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root fq limit 100",
@@ -410,8 +375,7 @@
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 100p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_codel.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_codel.json
index a65266357a9a..9774b1e8801b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_codel.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_codel.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms interval 100ms memory_limit 32Mb ecn drop_batch 64",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel limit 1000",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 1000p flows 1024 quantum.*target 5ms interval 100ms memory_limit 32Mb ecn drop_batch 64",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel memory_limit 100000",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms interval 100ms memory_limit 100000b ecn drop_batch 64",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel target 2000",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 2ms interval 100ms memory_limit 32Mb ecn drop_batch 64",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel interval 5000",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms interval 5ms memory_limit 32Mb ecn drop_batch 64",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel quantum 9000",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum 9000 target 5ms interval 100ms memory_limit 32Mb ecn drop_batch 64",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel noecn",
"expExitCode": "0",
@@ -156,8 +143,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms interval 100ms memory_limit 32Mb drop_batch 64",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -171,7 +157,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel ce_threshold 1024000",
"expExitCode": "0",
@@ -179,8 +164,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms ce_threshold 1.02s interval 100ms memory_limit 32Mb ecn drop_batch 64",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -194,7 +178,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel drop_batch 100",
"expExitCode": "0",
@@ -202,8 +185,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms interval 100ms memory_limit 32Mb ecn drop_batch 100",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -217,7 +199,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel limit 1000 flows 256 drop_batch 100",
"expExitCode": "0",
@@ -225,8 +206,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 1000p flows 256 quantum.*target 5ms interval 100ms memory_limit 32Mb ecn drop_batch 100",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -240,7 +220,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root fq_codel limit 1000 flows 256 drop_batch 100"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root fq_codel noecn",
@@ -249,8 +228,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 1000p flows 256 quantum.*target 5ms interval 100ms memory_limit 32Mb drop_batch 100",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -264,7 +242,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root fq_codel limit 1000 flows 256 drop_batch 100"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root fq_codel limit 2000",
@@ -273,8 +250,7 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 2000p flows 256 quantum.*target 5ms interval 100ms memory_limit 32Mb ecn drop_batch 100",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -288,7 +264,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root fq_codel limit 1000 flows 256 drop_batch 100"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -297,7 +272,6 @@
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 1000p flows 256 quantum.*target 5ms interval 100ms memory_limit 32Mb noecn drop_batch 100",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -311,7 +285,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel",
"expExitCode": "0",
@@ -319,8 +292,7 @@
"matchPattern": "class fq_codel 1:",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
index 773c5027553d..d012d88d67fe 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
@@ -6,8 +6,10 @@
"qdisc",
"fq_pie"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_pie flows 65536",
"expExitCode": "0",
@@ -15,7 +17,6 @@
"matchPattern": "qdisc fq_pie 1: root refcnt 2 limit 10240p flows 65536",
"matchCount": "1",
"teardown": [
- "$IP link del dev $DUMMY"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/gred.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/gred.json
index 013c8ee037a4..df07fe318de9 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/gred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/gred.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root gred setup vqs 10 default 1",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc gred 1: root refcnt [0-9]+ vqs 10 default 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root gred setup vqs 10 default 1 grio",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc gred 1: root refcnt [0-9]+ vqs 10 default 1.*grio",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root gred setup vqs 10 default 1 limit 1000",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc gred 1: root refcnt [0-9]+ vqs 10 default 1 limit 1000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root gred setup vqs 10 default 2 ecn",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc gred 1: root refcnt [0-9]+ vqs 10 default 2.*ecn",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root gred setup vqs 10 default 2 harddrop",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc gred 1: root refcnt [0-9]+ vqs 10 default 2.*harddrop",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root gred setup vqs 10 default 1"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root gred limit 60KB min 15K max 25K burst 64 avpkt 1500 bandwidth 10Mbit DP 1 probability 0.1",
@@ -134,8 +123,7 @@
"matchPattern": "qdisc gred 1: root refcnt [0-9]+ vqs 10 default 1 limit.*vq 1 prio [0-9]+ limit 60Kb min 15Kb max 25Kb",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -149,7 +137,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root gred setup vqs 10 default 1",
"expExitCode": "0",
@@ -157,8 +144,7 @@
"matchPattern": "class gred 1:",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/hfsc.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/hfsc.json
index af27b2c20e17..0ddb8e1b4369 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/hfsc.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/hfsc.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hfsc",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc hfsc 1: root refcnt [0-9]+",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root hfsc default 11"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 hfsc sc rate 20000 ul rate 10000",
@@ -42,8 +39,7 @@
"matchPattern": "class hfsc 1:1 parent 1: sc m1 0bit d 0us m2 20Kbit ul m1 0bit d 0us m2 10Kbit",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -57,7 +53,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root hfsc default 11"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 hfsc sc umax 1540 dmax 5ms rate 10000 ul rate 10000",
@@ -66,8 +61,7 @@
"matchPattern": "class hfsc 1:1 parent 1: sc m1 2464Kbit d 5ms m2 10Kbit ul m1 0bit d 0us m2 10Kbit",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -81,7 +75,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root hfsc default 11"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 hfsc rt rate 20000 ls rate 10000",
@@ -90,8 +83,7 @@
"matchPattern": "class hfsc 1:1 parent 1: rt m1 0bit d 0us m2 20Kbit ls m1 0bit d 0us m2 10Kbit",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -105,7 +97,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root hfsc default 11"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 hfsc rt umax 1540 dmax 5ms rate 10000 ls rate 10000",
@@ -114,8 +105,7 @@
"matchPattern": "class hfsc 1:1 parent 1: rt m1 2464Kbit d 5ms m2 10Kbit ls m1 0bit d 0us m2 10Kbit",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -129,7 +119,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root hfsc default 11"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -138,7 +127,6 @@
"matchPattern": "qdisc hfsc 1: root refcnt [0-9]+",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -152,7 +140,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hfsc",
"expExitCode": "0",
@@ -160,8 +147,7 @@
"matchPattern": "class hfsc 1: root",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/hhf.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/hhf.json
index 949f6e5de902..dbef5474b26b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/hhf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/hhf.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hhf",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc hhf 1: root refcnt [0-9]+.*hh_limit 2048 reset_timeout 40ms admit_bytes 128Kb evict_timeout 1s non_hh_weight 2",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hhf limit 1500",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc hhf 1: root refcnt [0-9]+ limit 1500p.*hh_limit 2048 reset_timeout 40ms admit_bytes 128Kb evict_timeout 1s non_hh_weight 2",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hhf quantum 9000",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc hhf 1: root refcnt [0-9]+.*quantum 9000b hh_limit 2048 reset_timeout 40ms admit_bytes 128Kb evict_timeout 1s non_hh_weight 2",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hhf reset_timeout 100ms",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc hhf 1: root refcnt [0-9]+.*hh_limit 2048 reset_timeout 100ms admit_bytes 128Kb evict_timeout 1s non_hh_weight 2",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hhf admit_bytes 100000",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc hhf 1: root refcnt [0-9]+.*hh_limit 2048 reset_timeout 40ms admit_bytes 100000b evict_timeout 1s non_hh_weight 2",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hhf evict_timeout 0.5s",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc hhf 1: root refcnt [0-9]+.*hh_limit 2048 reset_timeout 40ms admit_bytes 128Kb evict_timeout 500ms non_hh_weight 2",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hhf non_hh_weight 10",
"expExitCode": "0",
@@ -156,8 +143,7 @@
"matchPattern": "qdisc hhf 1: root refcnt [0-9]+.*hh_limit 2048 reset_timeout 40ms admit_bytes 128Kb evict_timeout 1s non_hh_weight 10",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -171,7 +157,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root hhf"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root hhf limit 1500",
@@ -180,8 +165,7 @@
"matchPattern": "qdisc hhf 1: root refcnt [0-9]+ limit 1500p.*hh_limit 2048 reset_timeout 40ms admit_bytes 128Kb evict_timeout 1s non_hh_weight 2",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -195,7 +179,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root hhf",
"expExitCode": "0",
@@ -203,8 +186,7 @@
"matchPattern": "class hhf 1:",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/htb.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/htb.json
index 9529899482e0..cab745f9a83c 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/htb.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/htb.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root htb",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc htb 1: root refcnt [0-9]+ r2q 10 default 0 direct_packets_stat.*direct_qlen",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root htb default 10",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc htb 1: root refcnt [0-9]+ r2q 10 default 0x10 direct_packets_stat.* direct_qlen",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root htb r2q 5",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc htb 1: root refcnt [0-9]+ r2q 5 default 0 direct_packets_stat.*direct_qlen",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root htb direct_qlen 1024",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc htb 1: root refcnt [0-9]+ r2q 10 default 0 direct_packets_stat.*direct_qlen 1024",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root htb"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 htb rate 20kbit burst 1000",
@@ -111,8 +102,7 @@
"matchPattern": "class htb 1:1 root prio 0 rate 20Kbit ceil 20Kbit burst 1000b cburst 1600b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -126,7 +116,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root htb"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 htb rate 20Kbit mpu 64",
@@ -135,8 +124,7 @@
"matchPattern": "class htb 1:1 root prio 0 rate 20Kbit ceil 20Kbit burst 1600b cburst 1600b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -150,7 +138,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root htb"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 htb rate 20Kbit prio 1",
@@ -159,8 +146,7 @@
"matchPattern": "class htb 1:1 root prio 1 rate 20Kbit ceil 20Kbit burst 1600b cburst 1600b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -174,7 +160,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root htb"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 htb rate 20Kbit ceil 10Kbit",
@@ -183,8 +168,7 @@
"matchPattern": "class htb 1:1 root prio 0 rate 20Kbit ceil 10Kbit burst 1600b cburst 1600b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -198,7 +182,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root htb"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 htb rate 20Kbit cburst 2000",
@@ -207,8 +190,7 @@
"matchPattern": "class htb 1:1 root prio 0 rate 20Kbit ceil 20Kbit burst 1600b cburst 2000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -222,7 +204,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root htb"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 htb rate 20Kbit mtu 2048",
@@ -231,8 +212,7 @@
"matchPattern": "class htb 1:1 root prio 0 rate 20Kbit ceil 20Kbit burst 2Kb cburst 2Kb",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -246,7 +226,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root htb"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 htb rate 20Kbit quantum 2048",
@@ -255,8 +234,7 @@
"matchPattern": "class htb 1:1 root prio 0 rate 20Kbit ceil 20Kbit burst 1600b cburst 1600b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -270,7 +248,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root htb r2q 5"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -279,7 +256,6 @@
"matchPattern": "qdisc htb 1: root refcnt [0-9]+",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/ingress.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/ingress.json
index 11d33362408c..57bddc1212d8 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/ingress.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/ingress.json
@@ -7,16 +7,17 @@
"ingress"
],
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"cmdUnderTest": "$TC qdisc add dev $DUMMY ingress",
"expExitCode": "0",
"verifyCmd": "$TC qdisc show dev $DUMMY",
"matchPattern": "qdisc ingress ffff:",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -26,8 +27,10 @@
"qdisc",
"ingress"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY ingress foorbar",
"expExitCode": "1",
@@ -35,7 +38,6 @@
"matchPattern": "qdisc ingress ffff:",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -45,8 +47,10 @@
"qdisc",
"ingress"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY ingress",
@@ -55,8 +59,7 @@
"matchPattern": "qdisc ingress ffff:",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
},
{
@@ -66,8 +69,10 @@
"qdisc",
"ingress"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY ingress",
"expExitCode": "2",
@@ -75,7 +80,6 @@
"matchPattern": "qdisc ingress ffff:",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -85,8 +89,10 @@
"qdisc",
"ingress"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY ingress",
"$TC qdisc del dev $DUMMY ingress"
],
@@ -96,7 +102,6 @@
"matchPattern": "qdisc ingress ffff:",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -106,8 +111,10 @@
"qdisc",
"ingress"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY ingress",
"expExitCode": "0",
@@ -115,8 +122,7 @@
"matchPattern": "class ingress",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY ingress",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY ingress"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json
index 7e41f548f8e8..3c4444961488 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ limit",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem limit 200",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ limit 200",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem delay 100ms",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*delay 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem delay 100ms 10ms distribution normal corrupt 1%",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*delay 100ms 10ms corrupt 1%",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem delay 100ms 10ms distribution normal duplicate 1%",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*delay 100ms 10ms duplicate 1%",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem delay 100ms 10ms distribution pareto loss 1%",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*delay 100ms 10ms loss 1%",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem delay 100ms 10ms distribution paretonormal loss state 1",
"expExitCode": "0",
@@ -156,8 +143,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*delay 100ms 10ms loss state p13 1% p31 99% p32 0% p23 100% p14 0%",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -171,7 +157,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem loss gemodel 1%",
"expExitCode": "0",
@@ -179,8 +164,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*loss gemodel p 1%",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -194,7 +178,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem delay 100ms 10ms reorder 2% gap 100",
"expExitCode": "0",
@@ -202,8 +185,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*reorder 2%",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -217,7 +199,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem rate 20000",
"expExitCode": "0",
@@ -225,8 +206,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*rate 20Kbit",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -240,7 +220,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem slot 10 200 packets 2000 bytes 9000",
"expExitCode": "0",
@@ -248,8 +227,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*slot 10ns 200ns packets 2000 bytes 9000",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -263,7 +241,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem slot distribution pareto 1ms 0.1ms",
"expExitCode": "0",
@@ -271,8 +248,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*slot distribution 1ms 100us",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -286,7 +262,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root netem delay 100ms 10ms distribution normal loss 1%"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root netem delay 100ms 10ms distribution normal loss 2%",
@@ -295,8 +270,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*loss 2%",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -310,7 +284,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root netem delay 100ms 10ms distribution normal loss 1%"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root netem delay 200ms 10ms",
@@ -319,8 +292,7 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*delay 200ms 10ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -334,7 +306,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root netem delay 100ms 10ms distribution normal"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -343,7 +314,6 @@
"matchPattern": "qdisc netem 1: root refcnt [0-9]+ .*delay 100ms 10ms",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -357,7 +327,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root netem",
"expExitCode": "0",
@@ -365,8 +334,7 @@
"matchPattern": "class netem 1:",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/pfifo_fast.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/pfifo_fast.json
index ab53238f4c5a..30da27fe8806 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/pfifo_fast.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/pfifo_fast.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root pfifo_fast",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc pfifo_fast 1: root refcnt [0-9]+ bands 3 priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root pfifo_fast",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "Sent.*bytes.*pkt \\(dropped.*overlimits.*requeues .*\\)",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root pfifo_fast"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 2: root pfifo_fast",
@@ -65,8 +60,7 @@
"matchPattern": "qdisc pfifo_fast 2: root refcnt [0-9]+ bands 3 priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 2: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 2: root"
]
},
{
@@ -80,7 +74,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root pfifo_fast"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -89,7 +82,6 @@
"matchPattern": "qdisc pfifo_fast 1: root refcnt [0-9]+ bands 3 priomap",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -103,7 +95,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root pfifo_fast"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: root",
@@ -112,8 +103,7 @@
"matchPattern": "qdisc pfifo_fast 1: root refcnt [0-9]+ bands 3 priomap",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/plug.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/plug.json
index 6454518af178..6ec7e0a01265 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/plug.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/plug.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root plug",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc plug 1: root refcnt",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root plug block",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc plug 1: root refcnt",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root plug release",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc plug 1: root refcnt",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root plug release_indefinite",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc plug 1: root refcnt",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root plug limit 100",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc plug 1: root refcnt",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root plug"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -134,7 +123,6 @@
"matchPattern": "qdisc plug 1: root refcnt",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root plug"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root plug limit 1000",
@@ -157,8 +144,7 @@
"matchPattern": "qdisc plug 1: root refcnt",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -172,7 +158,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root plug"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root plug limit 1000",
@@ -181,8 +166,7 @@
"matchPattern": "qdisc plug 1: root refcnt",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/prio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/prio.json
index 8186de2f0dcf..69abf041c799 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/prio.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/prio.json
@@ -6,8 +6,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root prio",
"expExitCode": "0",
@@ -15,8 +17,7 @@
"matchPattern": "qdisc prio 1: root",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root prio"
]
},
{
@@ -26,8 +27,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY root handle ffff: prio",
"expExitCode": "0",
@@ -35,7 +38,6 @@
"matchPattern": "qdisc prio ffff: root",
"matchCount": "1",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -45,8 +47,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY root handle 10000: prio",
"expExitCode": "255",
@@ -54,7 +58,6 @@
"matchPattern": "qdisc prio 10000: root",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -64,8 +67,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root prio foorbar",
"expExitCode": "1",
@@ -73,7 +78,6 @@
"matchPattern": "qdisc prio 1: root",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -83,8 +87,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root prio bands 4 priomap 1 1 2 2 3 3 0 0 1 2 3 0 0 0 0 0",
"expExitCode": "0",
@@ -92,8 +98,7 @@
"matchPattern": "qdisc prio 1: root.*bands 4 priomap.*1 1 2 2 3 3 0 0 1 2 3 0 0 0 0 0",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root prio"
]
},
{
@@ -103,8 +108,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root prio bands 4 priomap 1 1 2 2 3 3 0 0 1 2 3 0 0 0 0 0 1 1",
"expExitCode": "1",
@@ -112,7 +119,6 @@
"matchPattern": "qdisc prio 1: root.*bands 4 priomap.*1 1 2 2 3 3 0 0 1 2 3 0 0 0 0 0 1 1",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -122,8 +128,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root prio bands 4 priomap 1 1 2 2 7 5 0 0 1 2 3 0 0 0 0 0",
"expExitCode": "1",
@@ -131,7 +139,6 @@
"matchPattern": "qdisc prio 1: root.*bands 4 priomap.*1 1 2 2 7 5 0 0 1 2 3 0 0 0 0 0",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -141,8 +148,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root prio bands 1 priomap 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0",
"expExitCode": "2",
@@ -150,7 +159,6 @@
"matchPattern": "qdisc prio 1: root.*bands 1 priomap.*0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -160,8 +168,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root prio bands 1024 priomap 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16",
"expExitCode": "2",
@@ -169,7 +179,6 @@
"matchPattern": "qdisc prio 1: root.*bands 1024 priomap.*1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -179,8 +188,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root prio"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root prio bands 8 priomap 1 1 2 2 3 3 4 4 5 5 6 6 7 7 0 0",
@@ -189,8 +200,7 @@
"matchPattern": "qdisc prio 1: root.*bands 8 priomap.*1 1 2 2 3 3 4 4 5 5 6 6 7 7 0 0",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root prio"
]
},
{
@@ -200,8 +210,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root prio"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root prio",
@@ -210,8 +222,7 @@
"matchPattern": "qdisc prio 1: root",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root prio"
]
},
{
@@ -221,8 +232,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY root handle 1: prio",
"expExitCode": "2",
@@ -230,7 +243,6 @@
"matchPattern": "qdisc prio 1: root",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -240,8 +252,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY root handle 123^ prio",
"expExitCode": "255",
@@ -249,7 +263,6 @@
"matchPattern": "qdisc prio 123 root",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -259,8 +272,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY root handle 1: prio",
"$TC qdisc del dev $DUMMY root handle 1: prio"
],
@@ -270,7 +285,6 @@
"matchPattern": "qdisc ingress ffff:",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -280,8 +294,10 @@
"qdisc",
"prio"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root prio",
"expExitCode": "0",
@@ -289,8 +305,7 @@
"matchPattern": "class prio 1:[0-9]+ parent 1:",
"matchCount": "3",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root prio",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root prio"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json
index 976dffda4654..c95643929841 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root qfq",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc qfq 1: root refcnt [0-9]+",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root qfq"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 100",
@@ -42,8 +39,7 @@
"matchPattern": "class qfq 1:1 root weight 100 maxpkt",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -57,7 +53,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root qfq"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 9999",
@@ -66,8 +61,7 @@
"matchPattern": "class qfq 1:1 root weight 9999 maxpkt",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -81,7 +75,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root qfq"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq maxpkt 2000",
@@ -90,8 +83,7 @@
"matchPattern": "class qfq 1:1 root weight 1 maxpkt 2000",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -105,7 +97,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root qfq"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq maxpkt 128",
@@ -114,8 +105,7 @@
"matchPattern": "class qfq 1:1 root weight 1 maxpkt 128",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -129,7 +119,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root qfq"
],
"cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq maxpkt 99999",
@@ -138,8 +127,7 @@
"matchPattern": "class qfq 1:1 root weight 1 maxpkt 99999",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -153,7 +141,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root qfq",
"$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 100"
],
@@ -163,8 +150,7 @@
"matchPattern": "class qfq 1:[0-9]+ root weight [0-9]+00 maxpkt",
"matchCount": "2",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -178,7 +164,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root qfq",
"$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 100"
],
@@ -188,7 +173,6 @@
"matchPattern": "qdisc qfq 1: root refcnt [0-9]+",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -202,7 +186,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root qfq",
"expExitCode": "0",
@@ -210,8 +193,7 @@
"matchPattern": "class qfq 1:",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -225,7 +207,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$IP link set dev $DUMMY mtu 2147483647 || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root qfq"
],
@@ -235,7 +216,6 @@
"matchPattern": "class qfq 1:",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -249,7 +229,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$IP link set dev $DUMMY mtu 256 || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root qfq"
],
@@ -259,7 +238,6 @@
"matchPattern": "class qfq 1:",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -277,7 +255,6 @@
]
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$IP link set dev $DUMMY up || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: stab mtu 2048 tsize 512 mpu 0 overhead 999999999 linklayer ethernet root qfq",
"$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 100",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json
index 4b3e449857f2..eec73fda6c80 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red limit 1M avpkt 1500 min 100K max 300K",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb $",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red adaptive limit 1M avpkt 1500 min 100K max 300K",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb adaptive $",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn limit 1M avpkt 1500 min 100K max 300K",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn $",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn adaptive limit 1M avpkt 1500 min 100K max 300K",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn adaptive $",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn harddrop limit 1M avpkt 1500 min 100K max 300K",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn harddrop $",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn nodrop limit 1M avpkt 1500 min 100K max 300K",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn nodrop $",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red nodrop limit 1M avpkt 1500 min 100K max 300K",
"expExitCode": "2",
@@ -156,7 +143,6 @@
"matchPattern": "qdisc red",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
]
},
{
@@ -170,7 +156,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn harddrop nodrop limit 1M avpkt 1500 min 100K max 300K",
"expExitCode": "0",
@@ -178,8 +163,7 @@
"matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn harddrop nodrop $",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -193,7 +177,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red limit 1M avpkt 1500 min 100K max 300K",
"expExitCode": "0",
@@ -201,8 +184,7 @@
"matchPattern": "class red 1:[0-9]+ parent 1:",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json
index e21c7f22c6d4..aa7914c441ea 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 600s db 60s",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb rehash 60",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 60ms db 60s",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb db 100",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 600s db 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb limit 100",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 600s db 60s limit 100p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb max 100",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc sfb 1: root refcnt 2 rehash 600s db 60s.*max 100p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb target 100",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc sfb 1: root refcnt 2 rehash 600s db 60s.*target 100p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb increment 0.1",
"expExitCode": "0",
@@ -156,8 +143,7 @@
"matchPattern": "qdisc sfb 1: root refcnt 2 rehash 600s db 60s.*increment 0.1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -171,7 +157,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb decrement 0.1",
"expExitCode": "0",
@@ -179,8 +164,7 @@
"matchPattern": "qdisc sfb 1: root refcnt 2 rehash 600s db 60s.*decrement 0.1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -194,7 +178,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb penalty_rate 4000",
"expExitCode": "0",
@@ -202,8 +185,7 @@
"matchPattern": "qdisc sfb 1: root refcnt 2 rehash 600s db 60s.*penalty_rate 4000pps",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -217,7 +199,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb penalty_burst 64",
"expExitCode": "0",
@@ -225,8 +206,7 @@
"matchPattern": "qdisc sfb 1: root refcnt 2 rehash 600s db 60s.*penalty_burst 64p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -240,7 +220,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root sfb penalty_burst 64"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root sfb rehash 100",
@@ -249,8 +228,7 @@
"matchPattern": "qdisc sfb 1: root refcnt 2 rehash 100ms db 60s",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -264,7 +242,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb",
"expExitCode": "0",
@@ -272,8 +249,7 @@
"matchPattern": "class sfb 1:",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json
index b6be718a174a..16d51936b385 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc sfq 1: root refcnt [0-9]+ limit 127p quantum.*depth 127 divisor 1024",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq limit 8",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc sfq 1: root refcnt [0-9]+ limit 8p",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq perturb 10",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "depth 127 divisor 1024 perturb 10sec",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq quantum 9000",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc sfq 1: root refcnt [0-9]+ limit 127p quantum 9000b depth 127 divisor 1024",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq divisor 512",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc sfq 1: root refcnt [0-9]+ limit 127p quantum 1514b depth 127 divisor 512",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq flows 20",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc sfq 1: root refcnt",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq depth 64",
"expExitCode": "0",
@@ -156,8 +143,7 @@
"matchPattern": "qdisc sfq 1: root refcnt [0-9]+ limit 127p quantum 1514b depth 64 divisor 1024",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -171,7 +157,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq headdrop",
"expExitCode": "0",
@@ -179,8 +164,7 @@
"matchPattern": "qdisc sfq 1: root refcnt [0-9]+ limit 127p quantum 1514b depth 127 headdrop divisor 1024",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -194,7 +178,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq redflowlimit 100000 min 8000 max 60000 probability 0.20 ecn headdrop",
"expExitCode": "0",
@@ -202,8 +185,7 @@
"matchPattern": "qdisc sfq 1: root refcnt [0-9]+ limit 127p quantum 1514b depth 127 headdrop divisor 1024 ewma 6 min 8000b max 60000b probability 0.2 ecn",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -217,7 +199,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq",
"expExitCode": "0",
@@ -225,8 +206,7 @@
"matchPattern": "class sfq 1:",
"matchCount": "0",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/skbprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/skbprio.json
index 5766045c9d33..076d1d69a3a4 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/skbprio.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/skbprio.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root skbprio",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc skbprio 1: root refcnt [0-9]+ limit 64",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root skbprio limit 1",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc skbprio 1: root refcnt [0-9]+ limit 1",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root skbprio"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root skbprio limit 32",
@@ -65,8 +60,7 @@
"matchPattern": "qdisc skbprio 1: root refcnt [0-9]+ limit 32",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -80,7 +74,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root skbprio",
"expExitCode": "0",
@@ -88,8 +81,7 @@
"matchPattern": "class skbprio 1:",
"matchCount": "64",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/tbf.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/tbf.json
index a4b3dfe51ff5..547a44910041 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/tbf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/tbf.json
@@ -10,7 +10,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root tbf limit 1000 burst 1500 rate 10000",
"expExitCode": "0",
@@ -18,8 +17,7 @@
"matchPattern": "qdisc tbf 1: root refcnt [0-9]+ rate 10Kbit burst 1500b limit 1000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -33,7 +31,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root tbf limit 1000 burst 1500 rate 20000 mtu 2048",
"expExitCode": "0",
@@ -41,8 +38,7 @@
"matchPattern": "qdisc tbf 1: root refcnt [0-9]+ rate 20Kbit burst 1500b limit 1000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -56,7 +52,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root tbf limit 1000 burst 1500 rate 20000 mtu 1510 peakrate 30000",
"expExitCode": "0",
@@ -64,8 +59,7 @@
"matchPattern": "qdisc tbf 1: root refcnt [0-9]+ rate 20Kbit burst 1500b peakrate 30Kbit minburst.*limit 1000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -79,7 +73,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root tbf burst 1500 rate 20000 latency 100ms",
"expExitCode": "0",
@@ -87,8 +80,7 @@
"matchPattern": "qdisc tbf 1: root refcnt [0-9]+ rate 20Kbit burst 1500b lat 100ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -102,7 +94,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root tbf limit 1000 burst 1500 rate 20000 overhead 300",
"expExitCode": "0",
@@ -110,8 +101,7 @@
"matchPattern": "qdisc tbf 1: root refcnt [0-9]+ rate 20Kbit burst 1800b limit 1000b overhead 300",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -125,7 +115,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root tbf limit 1000 burst 1500 rate 20000 linklayer atm",
"expExitCode": "0",
@@ -133,8 +122,7 @@
"matchPattern": "qdisc tbf 1: root refcnt [0-9]+ rate 20Kbit burst 1696b limit 1000b linklayer atm",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -148,7 +136,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root tbf limit 1000 burst 1500 rate 20000 linklayer atm"
],
"cmdUnderTest": "$TC qdisc replace dev $DUMMY handle 1: root tbf limit 1000 burst 1500 rate 20000 linklayer ethernet",
@@ -157,8 +144,7 @@
"matchPattern": "qdisc tbf 1: root refcnt [0-9]+ rate 20Kbit burst 1500b limit 1000b",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -172,7 +158,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
"$TC qdisc add dev $DUMMY handle 1: root tbf burst 1500 rate 20000 latency 10ms"
],
"cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: root tbf burst 1500 rate 20000 latency 200ms",
@@ -181,8 +166,7 @@
"matchPattern": "qdisc tbf 1: root refcnt [0-9]+ rate 20Kbit burst 1500b lat 200ms",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
},
{
@@ -196,7 +180,6 @@
"requires": "nsPlugin"
},
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root tbf limit 1000 burst 1500 rate 10000",
"expExitCode": "0",
@@ -204,8 +187,7 @@
"matchPattern": "class tbf.*parent 1:",
"matchCount": "1",
"teardown": [
- "$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$TC qdisc del dev $DUMMY handle 1: root"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json
index 0082be0e93ac..e5cc31f265f8 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json
@@ -6,11 +6,8 @@
"qdisc",
"teql"
],
- "plugins": {
- "requires": "nsPlugin"
- },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
+ "$IP link add dev $DUMMY type dummy"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root teql0",
"expExitCode": "0",
@@ -19,7 +16,7 @@
"matchCount": "1",
"teardown": [
"$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$IP link del dev $DUMMY"
]
},
{
@@ -29,13 +26,10 @@
"qdisc",
"teql"
],
- "plugins": {
- "requires": "nsPlugin"
- },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
- "echo \"1 1 4\" > /sys/bus/netdevsim/new_device",
- "$TC qdisc add dev $ETH root handle 1: teql0"
+ "$IP link add dev $DUMMY type dummy",
+ "$IP link add dev $ETH type dummy",
+ "$TC qdisc add dev $ETH handle 1: root teql0"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root teql0",
"expExitCode": "0",
@@ -44,8 +38,8 @@
"matchCount": "1",
"teardown": [
"$TC qdisc del dev $DUMMY handle 1: root",
- "echo \"1\" > /sys/bus/netdevsim/del_device",
- "$IP link del dev $DUMMY type dummy"
+ "$IP link del dev $DUMMY",
+ "$IP link del dev $ETH"
]
},
{
@@ -55,11 +49,8 @@
"qdisc",
"teql"
],
- "plugins": {
- "requires": "nsPlugin"
- },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true",
+ "$IP link add dev $DUMMY type dummy",
"$TC qdisc add dev $DUMMY handle 1: root teql0"
],
"cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
@@ -68,7 +59,7 @@
"matchPattern": "qdisc teql0 1: root refcnt",
"matchCount": "0",
"teardown": [
- "$IP link del dev $DUMMY type dummy"
+ "$IP link del dev $DUMMY"
]
},
{
@@ -78,11 +69,8 @@
"qdisc",
"teql"
],
- "plugins": {
- "requires": "nsPlugin"
- },
"setup": [
- "$IP link add dev $DUMMY type dummy || /bin/true"
+ "$IP link add dev $DUMMY type dummy"
],
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root teql0",
"expExitCode": "0",
@@ -91,7 +79,7 @@
"matchCount": "1",
"teardown": [
"$TC qdisc del dev $DUMMY handle 1: root",
- "$IP link del dev $DUMMY type dummy"
+ "$IP link del dev $DUMMY"
]
}
]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index b98256f38447..a6718192aff3 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -16,6 +16,8 @@ import json
import subprocess
import time
import traceback
+import random
+from multiprocessing import Pool
from collections import OrderedDict
from string import Template
@@ -38,12 +40,11 @@ class PluginMgrTestFail(Exception):
class PluginMgr:
def __init__(self, argparser):
super().__init__()
- self.plugins = {}
+ self.plugins = set()
self.plugin_instances = []
self.failed_plugins = {}
self.argparser = argparser
- # TODO, put plugins in order
plugindir = os.getenv('TDC_PLUGIN_DIR', './plugins')
for dirpath, dirnames, filenames in os.walk(plugindir):
for fn in filenames:
@@ -53,32 +54,43 @@ class PluginMgr:
not fn.startswith('.#')):
mn = fn[0:-3]
foo = importlib.import_module('plugins.' + mn)
- self.plugins[mn] = foo
- self.plugin_instances.append(foo.SubPlugin())
+ self.plugins.add(mn)
+ self.plugin_instances[mn] = foo.SubPlugin()
def load_plugin(self, pgdir, pgname):
pgname = pgname[0:-3]
+ self.plugins.add(pgname)
+
foo = importlib.import_module('{}.{}'.format(pgdir, pgname))
- self.plugins[pgname] = foo
- self.plugin_instances.append(foo.SubPlugin())
- self.plugin_instances[-1].check_args(self.args, None)
+
+ # nsPlugin must always be the first one
+ if pgname == "nsPlugin":
+ self.plugin_instances.insert(0, (pgname, foo.SubPlugin()))
+ self.plugin_instances[0][1].check_args(self.args, None)
+ else:
+ self.plugin_instances.append((pgname, foo.SubPlugin()))
+ self.plugin_instances[-1][1].check_args(self.args, None)
def get_required_plugins(self, testlist):
'''
Get all required plugins from the list of test cases and return
all unique items.
'''
- reqs = []
+ reqs = set()
for t in testlist:
try:
if 'requires' in t['plugins']:
if isinstance(t['plugins']['requires'], list):
- reqs.extend(t['plugins']['requires'])
+ reqs.update(set(t['plugins']['requires']))
else:
- reqs.append(t['plugins']['requires'])
+ reqs.add(t['plugins']['requires'])
+ t['plugins'] = t['plugins']['requires']
+ else:
+ t['plugins'] = []
except KeyError:
+ t['plugins'] = []
continue
- reqs = get_unique_item(reqs)
+
return reqs
def load_required_plugins(self, reqs, parser, args, remaining):
@@ -115,15 +127,17 @@ class PluginMgr:
return args
def call_pre_suite(self, testcount, testidlist):
- for pgn_inst in self.plugin_instances:
+ for (_, pgn_inst) in self.plugin_instances:
pgn_inst.pre_suite(testcount, testidlist)
def call_post_suite(self, index):
- for pgn_inst in reversed(self.plugin_instances):
+ for (_, pgn_inst) in reversed(self.plugin_instances):
pgn_inst.post_suite(index)
def call_pre_case(self, caseinfo, *, test_skip=False):
- for pgn_inst in self.plugin_instances:
+ for (pgn, pgn_inst) in self.plugin_instances:
+ if pgn not in caseinfo['plugins']:
+ continue
try:
pgn_inst.pre_case(caseinfo, test_skip)
except Exception as ee:
@@ -133,29 +147,37 @@ class PluginMgr:
print('testid is {}'.format(caseinfo['id']))
raise
- def call_post_case(self):
- for pgn_inst in reversed(self.plugin_instances):
+ def call_post_case(self, caseinfo):
+ for (pgn, pgn_inst) in reversed(self.plugin_instances):
+ if pgn not in caseinfo['plugins']:
+ continue
pgn_inst.post_case()
- def call_pre_execute(self):
- for pgn_inst in self.plugin_instances:
+ def call_pre_execute(self, caseinfo):
+ for (pgn, pgn_inst) in self.plugin_instances:
+ if pgn not in caseinfo['plugins']:
+ continue
pgn_inst.pre_execute()
- def call_post_execute(self):
- for pgn_inst in reversed(self.plugin_instances):
+ def call_post_execute(self, caseinfo):
+ for (pgn, pgn_inst) in reversed(self.plugin_instances):
+ if pgn not in caseinfo['plugins']:
+ continue
pgn_inst.post_execute()
def call_add_args(self, parser):
- for pgn_inst in self.plugin_instances:
+ for (pgn, pgn_inst) in self.plugin_instances:
parser = pgn_inst.add_args(parser)
return parser
def call_check_args(self, args, remaining):
- for pgn_inst in self.plugin_instances:
+ for (pgn, pgn_inst) in self.plugin_instances:
pgn_inst.check_args(args, remaining)
- def call_adjust_command(self, stage, command):
- for pgn_inst in self.plugin_instances:
+ def call_adjust_command(self, caseinfo, stage, command):
+ for (pgn, pgn_inst) in self.plugin_instances:
+ if pgn not in caseinfo['plugins']:
+ continue
command = pgn_inst.adjust_command(stage, command)
return command
@@ -177,7 +199,7 @@ def replace_keywords(cmd):
return subcmd
-def exec_cmd(args, pm, stage, command):
+def exec_cmd(caseinfo, args, pm, stage, command):
"""
Perform any required modifications on an executable command, then run
it in a subprocess and return the results.
@@ -187,9 +209,10 @@ def exec_cmd(args, pm, stage, command):
if '$' in command:
command = replace_keywords(command)
- command = pm.call_adjust_command(stage, command)
+ command = pm.call_adjust_command(caseinfo, stage, command)
if args.verbose > 0:
print('command "{}"'.format(command))
+
proc = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
@@ -211,7 +234,7 @@ def exec_cmd(args, pm, stage, command):
return proc, foutput
-def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
+def prepare_env(caseinfo, args, pm, stage, prefix, cmdlist, output = None):
"""
Execute the setup/teardown commands for a test case.
Optionally terminate test execution if the command fails.
@@ -229,7 +252,7 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
if not cmd:
continue
- (proc, foutput) = exec_cmd(args, pm, stage, cmd)
+ (proc, foutput) = exec_cmd(caseinfo, args, pm, stage, cmd)
if proc and (proc.returncode not in exit_codes):
print('', file=sys.stderr)
@@ -352,6 +375,10 @@ def find_in_json_other(res, outputJSONVal, matchJSONVal, matchJSONKey=None):
def run_one_test(pm, args, index, tidx):
global NAMES
+ ns = NAMES['NS']
+ dev0 = NAMES['DEV0']
+ dev1 = NAMES['DEV1']
+ dummy = NAMES['DUMMY']
result = True
tresult = ""
tap = ""
@@ -366,38 +393,42 @@ def run_one_test(pm, args, index, tidx):
res.set_result(ResultState.skip)
res.set_errormsg('Test case designated as skipped.')
pm.call_pre_case(tidx, test_skip=True)
- pm.call_post_execute()
+ pm.call_post_execute(tidx)
return res
if 'dependsOn' in tidx:
if (args.verbose > 0):
print('probe command for test skip')
- (p, procout) = exec_cmd(args, pm, 'execute', tidx['dependsOn'])
+ (p, procout) = exec_cmd(tidx, args, pm, 'execute', tidx['dependsOn'])
if p:
if (p.returncode != 0):
res = TestResult(tidx['id'], tidx['name'])
res.set_result(ResultState.skip)
res.set_errormsg('probe command: test skipped.')
pm.call_pre_case(tidx, test_skip=True)
- pm.call_post_execute()
+ pm.call_post_execute(tidx)
return res
# populate NAMES with TESTID for this test
NAMES['TESTID'] = tidx['id']
+ NAMES['NS'] = '{}-{}'.format(NAMES['NS'], tidx['random'])
+ NAMES['DEV0'] = '{}id{}'.format(NAMES['DEV0'], tidx['id'])
+ NAMES['DEV1'] = '{}id{}'.format(NAMES['DEV1'], tidx['id'])
+ NAMES['DUMMY'] = '{}id{}'.format(NAMES['DUMMY'], tidx['id'])
pm.call_pre_case(tidx)
- prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"])
+ prepare_env(tidx, args, pm, 'setup', "-----> prepare stage", tidx["setup"])
if (args.verbose > 0):
print('-----> execute stage')
- pm.call_pre_execute()
- (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
+ pm.call_pre_execute(tidx)
+ (p, procout) = exec_cmd(tidx, args, pm, 'execute', tidx["cmdUnderTest"])
if p:
exit_code = p.returncode
else:
exit_code = None
- pm.call_post_execute()
+ pm.call_post_execute(tidx)
if (exit_code is None or exit_code != int(tidx["expExitCode"])):
print("exit: {!r}".format(exit_code))
@@ -409,7 +440,7 @@ def run_one_test(pm, args, index, tidx):
else:
if args.verbose > 0:
print('-----> verify stage')
- (p, procout) = exec_cmd(args, pm, 'verify', tidx["verifyCmd"])
+ (p, procout) = exec_cmd(tidx, args, pm, 'verify', tidx["verifyCmd"])
if procout:
if 'matchJSON' in tidx:
verify_by_json(procout, res, tidx, args, pm)
@@ -431,15 +462,49 @@ def run_one_test(pm, args, index, tidx):
else:
res.set_result(ResultState.success)
- prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
- pm.call_post_case()
+ prepare_env(tidx, args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
+ pm.call_post_case(tidx)
index += 1
# remove TESTID from NAMES
del(NAMES['TESTID'])
+
+ # Restore names
+ NAMES['NS'] = ns
+ NAMES['DEV0'] = dev0
+ NAMES['DEV1'] = dev1
+ NAMES['DUMMY'] = dummy
+
return res
+def prepare_run(pm, args, testlist):
+ tcount = len(testlist)
+ emergency_exit = False
+ emergency_exit_message = ''
+
+ try:
+ pm.call_pre_suite(tcount, testlist)
+ except Exception as ee:
+ ex_type, ex, ex_tb = sys.exc_info()
+ print('Exception {} {} (caught in pre_suite).'.
+ format(ex_type, ex))
+ traceback.print_tb(ex_tb)
+ emergency_exit_message = 'EMERGENCY EXIT, call_pre_suite failed with exception {} {}\n'.format(ex_type, ex)
+ emergency_exit = True
+
+ if emergency_exit:
+ pm.call_post_suite(1)
+ return emergency_exit_message
+
+ if args.verbose:
+ print('give test rig 2 seconds to stabilize')
+
+ time.sleep(2)
+
+def purge_run(pm, index):
+ pm.call_post_suite(index)
+
def test_runner(pm, args, filtered_tests):
"""
Driver function for the unit tests.
@@ -455,28 +520,9 @@ def test_runner(pm, args, filtered_tests):
tap = ''
badtest = None
stage = None
- emergency_exit = False
- emergency_exit_message = ''
tsr = TestSuiteReport()
- try:
- pm.call_pre_suite(tcount, [tidx['id'] for tidx in testlist])
- except Exception as ee:
- ex_type, ex, ex_tb = sys.exc_info()
- print('Exception {} {} (caught in pre_suite).'.
- format(ex_type, ex))
- traceback.print_tb(ex_tb)
- emergency_exit_message = 'EMERGENCY EXIT, call_pre_suite failed with exception {} {}\n'.format(ex_type, ex)
- emergency_exit = True
- stage = 'pre-SUITE'
-
- if emergency_exit:
- pm.call_post_suite(index)
- return emergency_exit_message
- if args.verbose > 1:
- print('give test rig 2 seconds to stabilize')
- time.sleep(2)
for tidx in testlist:
if "flower" in tidx["category"] and args.device == None:
errmsg = "Tests using the DEV2 variable must define the name of a "
@@ -539,7 +585,68 @@ def test_runner(pm, args, filtered_tests):
if input(sys.stdin):
print('got something on stdin')
- pm.call_post_suite(index)
+ return (index, tsr)
+
+def mp_bins(alltests):
+ serial = []
+ parallel = []
+
+ for test in alltests:
+ if 'nsPlugin' not in test['plugins']:
+ serial.append(test)
+ else:
+ # We can only create one netdevsim device at a time
+ if 'netdevsim/new_device' in str(test['setup']):
+ serial.append(test)
+ else:
+ parallel.append(test)
+
+ return (serial, parallel)
+
+def __mp_runner(tests):
+ (_, tsr) = test_runner(mp_pm, mp_args, tests)
+ return tsr._testsuite
+
+def test_runner_mp(pm, args, alltests):
+ prepare_run(pm, args, alltests)
+
+ (serial, parallel) = mp_bins(alltests)
+
+ batches = [parallel[n : n + 32] for n in range(0, len(parallel), 32)]
+ batches.insert(0, serial)
+
+ print("Executing {} tests in parallel and {} in serial".format(len(parallel), len(serial)))
+ print("Using {} batches".format(len(batches)))
+
+ # We can't pickle these objects so workaround them
+ global mp_pm
+ mp_pm = pm
+
+ global mp_args
+ mp_args = args
+
+ with Pool(args.mp) as p:
+ pres = p.map(__mp_runner, batches)
+
+ tsr = TestSuiteReport()
+ for trs in pres:
+ for res in trs:
+ tsr.add_resultdata(res)
+
+ # Passing an index is not useful in MP
+ purge_run(pm, None)
+
+ return tsr
+
+def test_runner_serial(pm, args, alltests):
+ prepare_run(pm, args, alltests)
+
+ if args.verbose:
+ print("Executing {} tests in serial".format(len(alltests)))
+
+ (index, tsr) = test_runner(pm, args, alltests)
+
+ purge_run(pm, index)
return tsr
@@ -568,12 +675,15 @@ def load_from_file(filename):
k['filename'] = filename
return testlist
+def identity(string):
+ return string
def args_parse():
"""
Create the argument parser.
"""
parser = argparse.ArgumentParser(description='Linux TC unit tests')
+ parser.register('type', None, identity)
return parser
@@ -631,6 +741,9 @@ def set_args(parser):
parser.add_argument(
'-P', '--pause', action='store_true',
help='Pause execution just before post-suite stage')
+ parser.add_argument(
+ '-J', '--multiprocess', type=int, default=1, dest='mp',
+ help='Run tests in parallel whenever possible')
return parser
@@ -661,7 +774,6 @@ def get_id_list(alltests):
"""
return [x["id"] for x in alltests]
-
def check_case_id(alltests):
"""
Check for duplicate test case IDs.
@@ -683,7 +795,6 @@ def generate_case_ids(alltests):
If a test case has a blank ID field, generate a random hex ID for it
and then write the test cases back to disk.
"""
- import random
for c in alltests:
if (c["id"] == ""):
while True:
@@ -742,6 +853,9 @@ def filter_tests_by_category(args, testlist):
return answer
+def set_random(alltests):
+ for tidx in alltests:
+ tidx['random'] = random.getrandbits(32)
def get_test_cases(args):
"""
@@ -840,6 +954,8 @@ def set_operation_mode(pm, parser, args, remaining):
list_test_cases(alltests)
exit(0)
+ set_random(alltests)
+
exit_code = 0 # KSFT_PASS
if len(alltests):
req_plugins = pm.get_required_plugins(alltests)
@@ -848,7 +964,12 @@ def set_operation_mode(pm, parser, args, remaining):
except PluginDependencyException as pde:
print('The following plugins were not found:')
print('{}'.format(pde.missing_pg))
- catresults = test_runner(pm, args, alltests)
+
+ if args.mp > 1:
+ catresults = test_runner_mp(pm, args, alltests)
+ else:
+ catresults = test_runner_serial(pm, args, alltests)
+
if catresults.count_failures() != 0:
exit_code = 1 # KSFT_FAIL
if args.format == 'none':
@@ -883,6 +1004,13 @@ def main():
Start of execution; set up argument parser and get the arguments,
and start operations.
"""
+ import resource
+
+ if sys.version_info.major < 3 or sys.version_info.minor < 8:
+ sys.exit("tdc requires at least python 3.8")
+
+ resource.setrlimit(resource.RLIMIT_NOFILE, (1048576, 1048576))
+
parser = args_parse()
parser = set_args(parser)
pm = PluginMgr(parser)
diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c
index 01b636d3039a..6779d5008b27 100644
--- a/tools/testing/vsock/util.c
+++ b/tools/testing/vsock/util.c
@@ -211,102 +211,138 @@ int vsock_seqpacket_accept(unsigned int cid, unsigned int port,
return vsock_accept(cid, port, clientaddrp, SOCK_SEQPACKET);
}
-/* Transmit one byte and check the return value.
+/* Transmit bytes from a buffer and check the return value.
*
* expected_ret:
* <0 Negative errno (for testing errors)
* 0 End-of-file
- * 1 Success
+ * >0 Success (bytes successfully written)
*/
-void send_byte(int fd, int expected_ret, int flags)
+void send_buf(int fd, const void *buf, size_t len, int flags,
+ ssize_t expected_ret)
{
- const uint8_t byte = 'A';
- ssize_t nwritten;
+ ssize_t nwritten = 0;
+ ssize_t ret;
timeout_begin(TIMEOUT);
do {
- nwritten = send(fd, &byte, sizeof(byte), flags);
- timeout_check("write");
- } while (nwritten < 0 && errno == EINTR);
+ ret = send(fd, buf + nwritten, len - nwritten, flags);
+ timeout_check("send");
+
+ if (ret == 0 || (ret < 0 && errno != EINTR))
+ break;
+
+ nwritten += ret;
+ } while (nwritten < len);
timeout_end();
if (expected_ret < 0) {
- if (nwritten != -1) {
- fprintf(stderr, "bogus send(2) return value %zd\n",
- nwritten);
+ if (ret != -1) {
+ fprintf(stderr, "bogus send(2) return value %zd (expected %zd)\n",
+ ret, expected_ret);
exit(EXIT_FAILURE);
}
if (errno != -expected_ret) {
- perror("write");
+ perror("send");
exit(EXIT_FAILURE);
}
return;
}
- if (nwritten < 0) {
- perror("write");
+ if (ret < 0) {
+ perror("send");
exit(EXIT_FAILURE);
}
- if (nwritten == 0) {
- if (expected_ret == 0)
- return;
- fprintf(stderr, "unexpected EOF while sending byte\n");
- exit(EXIT_FAILURE);
- }
- if (nwritten != sizeof(byte)) {
- fprintf(stderr, "bogus send(2) return value %zd\n", nwritten);
+ if (nwritten != expected_ret) {
+ if (ret == 0)
+ fprintf(stderr, "unexpected EOF while sending bytes\n");
+
+ fprintf(stderr, "bogus send(2) bytes written %zd (expected %zd)\n",
+ nwritten, expected_ret);
exit(EXIT_FAILURE);
}
}
-/* Receive one byte and check the return value.
+/* Receive bytes in a buffer and check the return value.
*
* expected_ret:
* <0 Negative errno (for testing errors)
* 0 End-of-file
- * 1 Success
+ * >0 Success (bytes successfully read)
*/
-void recv_byte(int fd, int expected_ret, int flags)
+void recv_buf(int fd, void *buf, size_t len, int flags, ssize_t expected_ret)
{
- uint8_t byte;
- ssize_t nread;
+ ssize_t nread = 0;
+ ssize_t ret;
timeout_begin(TIMEOUT);
do {
- nread = recv(fd, &byte, sizeof(byte), flags);
- timeout_check("read");
- } while (nread < 0 && errno == EINTR);
+ ret = recv(fd, buf + nread, len - nread, flags);
+ timeout_check("recv");
+
+ if (ret == 0 || (ret < 0 && errno != EINTR))
+ break;
+
+ nread += ret;
+ } while (nread < len);
timeout_end();
if (expected_ret < 0) {
- if (nread != -1) {
- fprintf(stderr, "bogus recv(2) return value %zd\n",
- nread);
+ if (ret != -1) {
+ fprintf(stderr, "bogus recv(2) return value %zd (expected %zd)\n",
+ ret, expected_ret);
exit(EXIT_FAILURE);
}
if (errno != -expected_ret) {
- perror("read");
+ perror("recv");
exit(EXIT_FAILURE);
}
return;
}
- if (nread < 0) {
- perror("read");
+ if (ret < 0) {
+ perror("recv");
exit(EXIT_FAILURE);
}
- if (nread == 0) {
- if (expected_ret == 0)
- return;
- fprintf(stderr, "unexpected EOF while receiving byte\n");
- exit(EXIT_FAILURE);
- }
- if (nread != sizeof(byte)) {
- fprintf(stderr, "bogus recv(2) return value %zd\n", nread);
+ if (nread != expected_ret) {
+ if (ret == 0)
+ fprintf(stderr, "unexpected EOF while receiving bytes\n");
+
+ fprintf(stderr, "bogus recv(2) bytes read %zd (expected %zd)\n",
+ nread, expected_ret);
exit(EXIT_FAILURE);
}
+}
+
+/* Transmit one byte and check the return value.
+ *
+ * expected_ret:
+ * <0 Negative errno (for testing errors)
+ * 0 End-of-file
+ * 1 Success
+ */
+void send_byte(int fd, int expected_ret, int flags)
+{
+ const uint8_t byte = 'A';
+
+ send_buf(fd, &byte, sizeof(byte), flags, expected_ret);
+}
+
+/* Receive one byte and check the return value.
+ *
+ * expected_ret:
+ * <0 Negative errno (for testing errors)
+ * 0 End-of-file
+ * 1 Success
+ */
+void recv_byte(int fd, int expected_ret, int flags)
+{
+ uint8_t byte;
+
+ recv_buf(fd, &byte, sizeof(byte), flags, expected_ret);
+
if (byte != 'A') {
fprintf(stderr, "unexpected byte read %c\n", byte);
exit(EXIT_FAILURE);
diff --git a/tools/testing/vsock/util.h b/tools/testing/vsock/util.h
index fb99208a95ea..e5407677ce05 100644
--- a/tools/testing/vsock/util.h
+++ b/tools/testing/vsock/util.h
@@ -42,6 +42,9 @@ int vsock_stream_accept(unsigned int cid, unsigned int port,
int vsock_seqpacket_accept(unsigned int cid, unsigned int port,
struct sockaddr_vm *clientaddrp);
void vsock_wait_remote_close(int fd);
+void send_buf(int fd, const void *buf, size_t len, int flags,
+ ssize_t expected_ret);
+void recv_buf(int fd, void *buf, size_t len, int flags, ssize_t expected_ret);
void send_byte(int fd, int expected_ret, int flags);
void recv_byte(int fd, int expected_ret, int flags);
void run_tests(const struct test_case *test_cases,
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index 90718c2fd4ea..da4cb819a183 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -19,6 +19,7 @@
#include <time.h>
#include <sys/mman.h>
#include <poll.h>
+#include <signal.h>
#include "timeout.h"
#include "control.h"
@@ -261,7 +262,6 @@ static void test_msg_peek_client(const struct test_opts *opts,
bool seqpacket)
{
unsigned char buf[MSG_PEEK_BUF_LEN];
- ssize_t send_size;
int fd;
int i;
@@ -280,17 +280,7 @@ static void test_msg_peek_client(const struct test_opts *opts,
control_expectln("SRVREADY");
- send_size = send(fd, buf, sizeof(buf), 0);
-
- if (send_size < 0) {
- perror("send");
- exit(EXIT_FAILURE);
- }
-
- if (send_size != sizeof(buf)) {
- fprintf(stderr, "Invalid send size %zi\n", send_size);
- exit(EXIT_FAILURE);
- }
+ send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
close(fd);
}
@@ -301,7 +291,6 @@ static void test_msg_peek_server(const struct test_opts *opts,
unsigned char buf_half[MSG_PEEK_BUF_LEN / 2];
unsigned char buf_normal[MSG_PEEK_BUF_LEN];
unsigned char buf_peek[MSG_PEEK_BUF_LEN];
- ssize_t res;
int fd;
if (seqpacket)
@@ -315,34 +304,16 @@ static void test_msg_peek_server(const struct test_opts *opts,
}
/* Peek from empty socket. */
- res = recv(fd, buf_peek, sizeof(buf_peek), MSG_PEEK | MSG_DONTWAIT);
- if (res != -1) {
- fprintf(stderr, "expected recv(2) failure, got %zi\n", res);
- exit(EXIT_FAILURE);
- }
-
- if (errno != EAGAIN) {
- perror("EAGAIN expected");
- exit(EXIT_FAILURE);
- }
+ recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK | MSG_DONTWAIT,
+ -EAGAIN);
control_writeln("SRVREADY");
/* Peek part of data. */
- res = recv(fd, buf_half, sizeof(buf_half), MSG_PEEK);
- if (res != sizeof(buf_half)) {
- fprintf(stderr, "recv(2) + MSG_PEEK, expected %zu, got %zi\n",
- sizeof(buf_half), res);
- exit(EXIT_FAILURE);
- }
+ recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK, sizeof(buf_half));
/* Peek whole data. */
- res = recv(fd, buf_peek, sizeof(buf_peek), MSG_PEEK);
- if (res != sizeof(buf_peek)) {
- fprintf(stderr, "recv(2) + MSG_PEEK, expected %zu, got %zi\n",
- sizeof(buf_peek), res);
- exit(EXIT_FAILURE);
- }
+ recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK, sizeof(buf_peek));
/* Compare partial and full peek. */
if (memcmp(buf_half, buf_peek, sizeof(buf_half))) {
@@ -355,22 +326,11 @@ static void test_msg_peek_server(const struct test_opts *opts,
* so check it with MSG_PEEK. We must get length
* of the message.
*/
- res = recv(fd, buf_half, sizeof(buf_half), MSG_PEEK |
- MSG_TRUNC);
- if (res != sizeof(buf_peek)) {
- fprintf(stderr,
- "recv(2) + MSG_PEEK | MSG_TRUNC, exp %zu, got %zi\n",
- sizeof(buf_half), res);
- exit(EXIT_FAILURE);
- }
+ recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK | MSG_TRUNC,
+ sizeof(buf_peek));
}
- res = recv(fd, buf_normal, sizeof(buf_normal), 0);
- if (res != sizeof(buf_normal)) {
- fprintf(stderr, "recv(2), expected %zu, got %zi\n",
- sizeof(buf_normal), res);
- exit(EXIT_FAILURE);
- }
+ recv_buf(fd, buf_normal, sizeof(buf_normal), 0, sizeof(buf_normal));
/* Compare full peek and normal read. */
if (memcmp(buf_peek, buf_normal, sizeof(buf_peek))) {
@@ -415,7 +375,6 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
msg_count = SOCK_BUF_SIZE / MAX_MSG_SIZE;
for (int i = 0; i < msg_count; i++) {
- ssize_t send_size;
size_t buf_size;
int flags;
void *buf;
@@ -443,17 +402,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
flags = 0;
}
- send_size = send(fd, buf, buf_size, flags);
-
- if (send_size < 0) {
- perror("send");
- exit(EXIT_FAILURE);
- }
-
- if (send_size != buf_size) {
- fprintf(stderr, "Invalid send size\n");
- exit(EXIT_FAILURE);
- }
+ send_buf(fd, buf, buf_size, flags, buf_size);
/*
* Hash sum is computed at both client and server in
@@ -554,10 +503,7 @@ static void test_seqpacket_msg_trunc_client(const struct test_opts *opts)
exit(EXIT_FAILURE);
}
- if (send(fd, buf, sizeof(buf), 0) != sizeof(buf)) {
- perror("send failed");
- exit(EXIT_FAILURE);
- }
+ send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
control_writeln("SENDDONE");
close(fd);
@@ -679,7 +625,6 @@ static void test_seqpacket_timeout_server(const struct test_opts *opts)
static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
{
unsigned long sock_buf_size;
- ssize_t send_size;
socklen_t len;
void *data;
int fd;
@@ -706,18 +651,7 @@ static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
exit(EXIT_FAILURE);
}
- send_size = send(fd, data, sock_buf_size, 0);
- if (send_size != -1) {
- fprintf(stderr, "expected 'send(2)' failure, got %zi\n",
- send_size);
- exit(EXIT_FAILURE);
- }
-
- if (errno != EMSGSIZE) {
- fprintf(stderr, "expected EMSGSIZE in 'errno', got %i\n",
- errno);
- exit(EXIT_FAILURE);
- }
+ send_buf(fd, data, sock_buf_size, 0, -EMSGSIZE);
control_writeln("CLISENT");
@@ -771,15 +705,9 @@ static void test_seqpacket_invalid_rec_buffer_client(const struct test_opts *opt
memset(buf1, BUF_PATTERN_1, buf_size);
memset(buf2, BUF_PATTERN_2, buf_size);
- if (send(fd, buf1, buf_size, 0) != buf_size) {
- perror("send failed");
- exit(EXIT_FAILURE);
- }
+ send_buf(fd, buf1, buf_size, 0, buf_size);
- if (send(fd, buf2, buf_size, 0) != buf_size) {
- perror("send failed");
- exit(EXIT_FAILURE);
- }
+ send_buf(fd, buf2, buf_size, 0, buf_size);
close(fd);
}
@@ -900,7 +828,6 @@ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
unsigned long lowat_val = RCVLOWAT_BUF_SIZE;
char buf[RCVLOWAT_BUF_SIZE];
struct pollfd fds;
- ssize_t read_res;
short poll_flags;
int fd;
@@ -955,12 +882,7 @@ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
/* Use MSG_DONTWAIT, if call is going to wait, EAGAIN
* will be returned.
*/
- read_res = recv(fd, buf, sizeof(buf), MSG_DONTWAIT);
- if (read_res != RCVLOWAT_BUF_SIZE) {
- fprintf(stderr, "Unexpected recv result %zi\n",
- read_res);
- exit(EXIT_FAILURE);
- }
+ recv_buf(fd, buf, sizeof(buf), MSG_DONTWAIT, RCVLOWAT_BUF_SIZE);
control_writeln("POLLDONE");
@@ -972,7 +894,7 @@ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
static void test_inv_buf_client(const struct test_opts *opts, bool stream)
{
unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
- ssize_t ret;
+ ssize_t expected_ret;
int fd;
if (stream)
@@ -988,39 +910,18 @@ static void test_inv_buf_client(const struct test_opts *opts, bool stream)
control_expectln("SENDDONE");
/* Use invalid buffer here. */
- ret = recv(fd, NULL, sizeof(data), 0);
- if (ret != -1) {
- fprintf(stderr, "expected recv(2) failure, got %zi\n", ret);
- exit(EXIT_FAILURE);
- }
-
- if (errno != EFAULT) {
- fprintf(stderr, "unexpected recv(2) errno %d\n", errno);
- exit(EXIT_FAILURE);
- }
-
- ret = recv(fd, data, sizeof(data), MSG_DONTWAIT);
+ recv_buf(fd, NULL, sizeof(data), 0, -EFAULT);
if (stream) {
/* For SOCK_STREAM we must continue reading. */
- if (ret != sizeof(data)) {
- fprintf(stderr, "expected recv(2) success, got %zi\n", ret);
- exit(EXIT_FAILURE);
- }
- /* Don't check errno in case of success. */
+ expected_ret = sizeof(data);
} else {
/* For SOCK_SEQPACKET socket's queue must be empty. */
- if (ret != -1) {
- fprintf(stderr, "expected recv(2) failure, got %zi\n", ret);
- exit(EXIT_FAILURE);
- }
-
- if (errno != EAGAIN) {
- fprintf(stderr, "unexpected recv(2) errno %d\n", errno);
- exit(EXIT_FAILURE);
- }
+ expected_ret = -EAGAIN;
}
+ recv_buf(fd, data, sizeof(data), MSG_DONTWAIT, expected_ret);
+
control_writeln("DONE");
close(fd);
@@ -1029,7 +930,6 @@ static void test_inv_buf_client(const struct test_opts *opts, bool stream)
static void test_inv_buf_server(const struct test_opts *opts, bool stream)
{
unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
- ssize_t res;
int fd;
if (stream)
@@ -1042,11 +942,7 @@ static void test_inv_buf_server(const struct test_opts *opts, bool stream)
exit(EXIT_FAILURE);
}
- res = send(fd, data, sizeof(data), 0);
- if (res != sizeof(data)) {
- fprintf(stderr, "unexpected send(2) result %zi\n", res);
- exit(EXIT_FAILURE);
- }
+ send_buf(fd, data, sizeof(data), 0, sizeof(data));
control_writeln("SENDDONE");
@@ -1080,7 +976,6 @@ static void test_seqpacket_inv_buf_server(const struct test_opts *opts)
static void test_stream_virtio_skb_merge_client(const struct test_opts *opts)
{
- ssize_t res;
int fd;
fd = vsock_stream_connect(opts->peer_cid, 1234);
@@ -1090,22 +985,14 @@ static void test_stream_virtio_skb_merge_client(const struct test_opts *opts)
}
/* Send first skbuff. */
- res = send(fd, HELLO_STR, strlen(HELLO_STR), 0);
- if (res != strlen(HELLO_STR)) {
- fprintf(stderr, "unexpected send(2) result %zi\n", res);
- exit(EXIT_FAILURE);
- }
+ send_buf(fd, HELLO_STR, strlen(HELLO_STR), 0, strlen(HELLO_STR));
control_writeln("SEND0");
/* Peer reads part of first skbuff. */
control_expectln("REPLY0");
/* Send second skbuff, it will be appended to the first. */
- res = send(fd, WORLD_STR, strlen(WORLD_STR), 0);
- if (res != strlen(WORLD_STR)) {
- fprintf(stderr, "unexpected send(2) result %zi\n", res);
- exit(EXIT_FAILURE);
- }
+ send_buf(fd, WORLD_STR, strlen(WORLD_STR), 0, strlen(WORLD_STR));
control_writeln("SEND1");
/* Peer reads merged skbuff packet. */
@@ -1116,8 +1003,8 @@ static void test_stream_virtio_skb_merge_client(const struct test_opts *opts)
static void test_stream_virtio_skb_merge_server(const struct test_opts *opts)
{
+ size_t read = 0, to_read;
unsigned char buf[64];
- ssize_t res;
int fd;
fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
@@ -1129,26 +1016,21 @@ static void test_stream_virtio_skb_merge_server(const struct test_opts *opts)
control_expectln("SEND0");
/* Read skbuff partially. */
- res = recv(fd, buf, 2, 0);
- if (res != 2) {
- fprintf(stderr, "expected recv(2) returns 2 bytes, got %zi\n", res);
- exit(EXIT_FAILURE);
- }
+ to_read = 2;
+ recv_buf(fd, buf + read, to_read, 0, to_read);
+ read += to_read;
control_writeln("REPLY0");
control_expectln("SEND1");
- res = recv(fd, buf + 2, sizeof(buf) - 2, 0);
- if (res != 8) {
- fprintf(stderr, "expected recv(2) returns 8 bytes, got %zi\n", res);
- exit(EXIT_FAILURE);
- }
+ /* Read the rest of both buffers */
+ to_read = strlen(HELLO_STR WORLD_STR) - read;
+ recv_buf(fd, buf + read, to_read, 0, to_read);
+ read += to_read;
- res = recv(fd, buf, sizeof(buf) - 8 - 2, MSG_DONTWAIT);
- if (res != -1) {
- fprintf(stderr, "expected recv(2) failure, got %zi\n", res);
- exit(EXIT_FAILURE);
- }
+ /* No more bytes should be there */
+ to_read = sizeof(buf) - read;
+ recv_buf(fd, buf + read, to_read, MSG_DONTWAIT, -EAGAIN);
if (memcmp(buf, HELLO_STR WORLD_STR, strlen(HELLO_STR WORLD_STR))) {
fprintf(stderr, "pattern mismatch\n");
@@ -1170,6 +1052,133 @@ static void test_seqpacket_msg_peek_server(const struct test_opts *opts)
return test_msg_peek_server(opts, true);
}
+static sig_atomic_t have_sigpipe;
+
+static void sigpipe(int signo)
+{
+ have_sigpipe = 1;
+}
+
+static void test_stream_check_sigpipe(int fd)
+{
+ ssize_t res;
+
+ have_sigpipe = 0;
+
+ res = send(fd, "A", 1, 0);
+ if (res != -1) {
+ fprintf(stderr, "expected send(2) failure, got %zi\n", res);
+ exit(EXIT_FAILURE);
+ }
+
+ if (!have_sigpipe) {
+ fprintf(stderr, "SIGPIPE expected\n");
+ exit(EXIT_FAILURE);
+ }
+
+ have_sigpipe = 0;
+
+ res = send(fd, "A", 1, MSG_NOSIGNAL);
+ if (res != -1) {
+ fprintf(stderr, "expected send(2) failure, got %zi\n", res);
+ exit(EXIT_FAILURE);
+ }
+
+ if (have_sigpipe) {
+ fprintf(stderr, "SIGPIPE not expected\n");
+ exit(EXIT_FAILURE);
+ }
+}
+
+static void test_stream_shutwr_client(const struct test_opts *opts)
+{
+ int fd;
+
+ struct sigaction act = {
+ .sa_handler = sigpipe,
+ };
+
+ sigaction(SIGPIPE, &act, NULL);
+
+ fd = vsock_stream_connect(opts->peer_cid, 1234);
+ if (fd < 0) {
+ perror("connect");
+ exit(EXIT_FAILURE);
+ }
+
+ if (shutdown(fd, SHUT_WR)) {
+ perror("shutdown");
+ exit(EXIT_FAILURE);
+ }
+
+ test_stream_check_sigpipe(fd);
+
+ control_writeln("CLIENTDONE");
+
+ close(fd);
+}
+
+static void test_stream_shutwr_server(const struct test_opts *opts)
+{
+ int fd;
+
+ fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ if (fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ control_expectln("CLIENTDONE");
+
+ close(fd);
+}
+
+static void test_stream_shutrd_client(const struct test_opts *opts)
+{
+ int fd;
+
+ struct sigaction act = {
+ .sa_handler = sigpipe,
+ };
+
+ sigaction(SIGPIPE, &act, NULL);
+
+ fd = vsock_stream_connect(opts->peer_cid, 1234);
+ if (fd < 0) {
+ perror("connect");
+ exit(EXIT_FAILURE);
+ }
+
+ control_expectln("SHUTRDDONE");
+
+ test_stream_check_sigpipe(fd);
+
+ control_writeln("CLIENTDONE");
+
+ close(fd);
+}
+
+static void test_stream_shutrd_server(const struct test_opts *opts)
+{
+ int fd;
+
+ fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ if (fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ if (shutdown(fd, SHUT_RD)) {
+ perror("shutdown");
+ exit(EXIT_FAILURE);
+ }
+
+ control_writeln("SHUTRDDONE");
+ control_expectln("CLIENTDONE");
+
+ close(fd);
+}
+
static struct test_case test_cases[] = {
{
.name = "SOCK_STREAM connection reset",
@@ -1250,6 +1259,16 @@ static struct test_case test_cases[] = {
.run_client = test_seqpacket_msg_peek_client,
.run_server = test_seqpacket_msg_peek_server,
},
+ {
+ .name = "SOCK_STREAM SHUT_WR",
+ .run_client = test_stream_shutwr_client,
+ .run_server = test_stream_shutwr_server,
+ },
+ {
+ .name = "SOCK_STREAM SHUT_RD",
+ .run_client = test_stream_shutrd_client,
+ .run_server = test_stream_shutrd_server,
+ },
{},
};